initial commit

This commit is contained in:
allard
2025-11-23 18:58:51 +01:00
commit 376a944abc
1553 changed files with 314731 additions and 0 deletions

View File

@@ -0,0 +1,271 @@
apiVersion: pgv2.percona.com/v2
kind: PerconaPGCluster
metadata:
name: cluster1
# finalizers:
# - percona.com/delete-pvc
# - percona.com/delete-ssl
spec:
crVersion: 2.3.0
# secrets:
# customTLSSecret:
# name: cluster1-cert
# customReplicationTLSSecret:
# name: replication1-cert
# standby:
# enabled: true
# host: "<primary-ip>"
# port: "<primary-port>"
# repoName: repo1
# openshift: true
# users:
# - name: rhino
# databases:
# - zoo
# options: "SUPERUSER"
# password:
# type: ASCII
# secretName: "rhino-credentials"
# databaseInitSQL:
# key: init.sql
# name: cluster1-init-sql
# pause: true
# unmanaged: true
# dataSource:
# postgresCluster:
# clusterName: cluster1
# repoName: repo1
# options:
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# pgbackrest:
# stanza: db
# configuration:
# - secret:
# name: pgo-s3-creds
# global:
# repo1-path: /pgbackrest/postgres-operator/hippo/repo1
# repo:
# name: repo1
# s3:
# bucket: "my-bucket"
# endpoint: "s3.ca-central-1.amazonaws.com"
# region: "ca-central-1"
image: perconalab/percona-postgresql-operator:main-ppg15-postgres
imagePullPolicy: Always
postgresVersion: 15
port: 5432
expose:
annotations:
my-annotation: percona-lb
labels:
my-label: percona-lb
type: LoadBalancer
instances:
- name: instance1
replicas: 3
# resources:
# limits:
# cpu: 2.0
# memory: 4Gi
#
# sidecars:
# - name: testcontainer
# image: mycontainer1:latest
# - name: testcontainer2
# image: mycontainer1:latest
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/instance-set: instance1
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# priorityClassName: high-priority
#
# walVolumeClaimSpec:
# accessModes:
# - "ReadWriteOnce"
# resources:
# requests:
# storage: 1Gi
#
dataVolumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
proxy:
pgBouncer:
replicas: 3
image: perconalab/percona-postgresql-operator:main-ppg15-pgbouncer
# exposeSuperusers: true
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
#
# expose:
# annotations:
# my-annotation: value1
# labels:
# my-label: value2
# type: LoadBalancer
#
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/role: pgbouncer
#
# sidecars:
# - name: bouncertestcontainer1
# image: mycontainer1:latest
#
# customTLSSecret:
# name: keycloakdb-pgbouncer.tls
#
# config:
# global:
# pool_mode: transaction
backups:
pgbackrest:
# metadata:
# labels:
image: perconalab/percona-postgresql-operator:main-ppg15-pgbackrest
# configuration:
# - secret:
# name: cluster1-pgbackrest-secrets
# jobs:
# priorityClassName: high-priority
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# global:
# repo1-retention-full: "14"
# repo1-retention-full-type: time
# repo1-path: /pgbackrest/postgres-operator/cluster1/repo1
# repo1-cipher-type: aes-256-cbc
# repo1-s3-uri-style: path
# repo2-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo2
# repo3-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo3
# repo4-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo4
# repoHost:
# priorityClassName: high-priority
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/pgbackrest: ""
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
manual:
repoName: repo1
options:
- --type=full
repos:
- name: repo1
schedules:
full: "0 0 * * 6"
# differential: "0 1 * * 1-6"
volume:
volumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
# - name: repo2
# s3:
# bucket: "<YOUR_AWS_S3_BUCKET_NAME>"
# endpoint: "<YOUR_AWS_S3_ENDPOINT>"
# region: "<YOUR_AWS_S3_REGION>"
# - name: repo3
# gcs:
# bucket: "<YOUR_GCS_BUCKET_NAME>"
# - name: repo4
# azure:
# container: "<YOUR_AZURE_CONTAINER>"
#
# restore:
# enabled: true
# repoName: repo1
# options:
# PITR restore in place
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# restore individual databases
# - --db-include=hippo
pmm:
enabled: true
image: percona/pmm-client:2.37.0
# imagePullPolicy: IfNotPresent
secret: cluster1-pmm-secret
serverHost: monitoring-service
# patroni:
# dynamicConfiguration:
# postgresql:
# parameters:
# max_parallel_workers: 2
# max_worker_processes: 2
# shared_buffers: 1GB
# work_mem: 2MB

View File

@@ -0,0 +1,2 @@
echo user: $(microk8s kubectl get secret cluster1-pguser-cluster1 -o jsonpath="{.data.user}" | base64 -d)
echo password: $(microk8s kubectl get secret cluster1-pguser-cluster1 -o jsonpath="{.data.password}" | base64 -d)

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: percona-pmm-tls
spec:
entryPoints:
- websecure
routes:
- match: Host(`percona.alldcs.nl`)
kind: Rule
services:
- name: percona-pmm
port: 443
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,28 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: percona-route-tcp-tls
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`percona.alldcs.nl`)
services:
- name: percona-pmm
port: 443
tls:
passthrough: true
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: percona-route-tcp-http
spec:
entryPoints:
- web
routes:
- match: HostSNI(`percona.alldcs.nl`)
services:
- name: percona-pmm
port: 80

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: pmm-secret
labels:
app.kubernetes.io/name: pmm
type: Opaque
data:
# base64 encoded password
# encode some password: `echo -n "admin" | base64`
PMM_ADMIN_PASSWORD: YWRtaW4=

View File

@@ -0,0 +1,258 @@
## @section Percona Monitoring and Management (PMM) parameters
## Default values for PMM.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
## PMM image version
## ref: https://hub.docker.com/r/percona/pmm-server/tags
## @param image.repository PMM image repository
## @param image.pullPolicy PMM image pull policy
## @param image.tag PMM image tag (immutable tags are recommended)
## @param image.imagePullSecrets Global Docker registry secret names as an array
##
image:
repository: percona/pmm-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "2.39.0"
imagePullSecrets: []
## PMM environment variables
## ref: https://docs.percona.com/percona-monitoring-and-management/setting-up/server/docker.html#environment-variables
##
pmmEnv:
## @param pmmEnv.DISABLE_UPDATES Disables a periodic check for new PMM versions as well as ability to apply upgrades using the UI (need to be disabled in k8s environment as updates rolled with helm/container update)
##
DISABLE_UPDATES: "1"
# ENABLE_DBAAS: "1"
# optional variables to integrate Grafana with internal iDP, see also secret part
# GF_AUTH_GENERIC_OAUTH_ENABLED: 'true'
# GF_AUTH_GENERIC_OAUTH_SCOPES: ''
# GF_AUTH_GENERIC_OAUTH_AUTH_URL: ''
# GF_AUTH_GENERIC_OAUTH_TOKEN_URL: ''
# GF_AUTH_GENERIC_OAUTH_API_URL: ''
# GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: ''
## @param pmmResources optional [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) requested for [PMM container](https://docs.percona.com/percona-monitoring-and-management/setting-up/server/index.html#set-up-pmm-server)
## pmmResources:
## requests:
## memory: "32Gi"
## cpu: "8"
## limits:
## memory: "64Gi"
## cpu: "32"
pmmResources: {}
## Readiness probe Config
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param readyProbeConf.initialDelaySeconds Number of seconds after the container has started before readiness probes is initiated
## @param readyProbeConf.periodSeconds How often (in seconds) to perform the probe
## @param readyProbeConf.failureThreshold When a probe fails, Kubernetes will try failureThreshold times before giving up
##
readyProbeConf:
initialDelaySeconds: 1
periodSeconds: 5
failureThreshold: 6
## @section PMM secrets
##
secret:
## @param secret.name Defines the name of the k8s secret that holds passwords and other secrets
##
name: pmm-secret
## @param secret.create If true then secret will be generated by Helm chart. Otherwise it is expected to be created by user.
##
create: true
## @param secret.pmm_password Initial PMM password - it changes only on the first deployment, ignored if PMM was already provisioned and just restarted. If PMM admin password is not set, it will be generated.
## E.g.
## pmm_password: admin
##
## To get password execute `kubectl get secret pmm-secret -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode`
##
pmm_password: ""
##
# GF_AUTH_GENERIC_OAUTH_CLIENT_ID optional client ID to integrate Grafana with internal iDP, requires other env defined as well under pmmEnv
# GF_AUTH_GENERIC_OAUTH_CLIENT_ID:
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET optional secret to integrate Grafana with internal iDP, requires other env defined as well under pmmEnv
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET:
## @param certs Optional certificates, if not provided PMM would use generated self-signed certificates,
## please provide your own signed ssl certificates like this:
## certs:
## name: pmm-certs
## files:
## certificate.crt:
## certificate.key:
## ca-certs.pem:
## dhparam.pem:
certs: {}
## @section PMM network configuration
## Service configuration
##
service:
## @param service.name Service name that is dns name monitoring services would send data to. `monitoring-service` used by default by pmm-client in Percona operators.
##
name: percona-pmm
## @param service.type Kubernetes Service type
##
type: NodePort
## Ports 443 and/or 80
##
ports:
## @param service.ports[0].port https port number
- port: 443
## @param service.ports[0].targetPort target port to map for statefulset and ingress
targetPort: https
## @param service.ports[0].protocol protocol for https
protocol: TCP
## @param service.ports[0].name port name
name: https
## @param service.ports[1].port http port number
- port: 80
## @param service.ports[1].targetPort target port to map for statefulset and ingress
targetPort: http
## @param service.ports[1].protocol protocol for http
protocol: TCP
## @param service.ports[1].name port name
name: http
## Ingress controller configuration
##
ingress:
## @param ingress.enabled -- Enable ingress controller resource
enabled: false
## @param ingress.nginxInc -- Using ingress controller from NGINX Inc
nginxInc: false
## @param ingress.annotations -- Ingress annotations configuration
annotations: {}
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
### nginx proxy to https
## nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
## @param ingress.community.annotations -- Ingress annotations configuration for community managed ingress (nginxInc = false)
community:
annotations: {}
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
## @param ingress.ingressClassName -- Sets the ingress controller class name to use.
ingressClassName: ""
## Ingress resource hostnames and path mappings
hosts:
## @param ingress.hosts[0].host hostname
- host: chart-example.local
## @param ingress.hosts[0].paths path mapping
paths: []
## @param ingress.pathType -- How ingress paths should be treated.
pathType: Prefix
## @param ingress.tls -- Ingress TLS configuration
tls: []
## - secretName: chart-example-tls
## hosts:
## - chart-example.local
## @section PMM storage configuration
## Claiming storage for PMM using Persistent Volume Claims (PVC)
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
storage:
## @param storage.name name of PVC
name: pmm-storage
## @param storage.storageClassName optional PMM data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClassName: ""
##
## @param storage.size size of storage [depends](https://docs.percona.com/percona-monitoring-and-management/setting-up/server/index.html#set-up-pmm-server) on number of monitored services and data retention
##
size: 10Gi
##
## @param storage.dataSource VolumeSnapshot to start from
##
dataSource: {}
## name: before-vX.Y.Z-upgrade
## kind: VolumeSnapshot
## apiGroup: snapshot.storage.k8s.io
##
## @param storage.selector select existing PersistentVolume
##
selector: {}
## matchLabels:
## release: "stable"
## matchExpressions:
## - key: environment
## operator: In
## values:
## - dev
## @section PMM kubernetes configurations
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param extraLabels Labels to add to all deployed objects
##
extraLabels: {}
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
## @param serviceAccount.create Specifies whether a ServiceAccount should be created
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
##
serviceAccount:
create: true
annotations: {}
name: "pmm-service-account"
## @param podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podSecurityContext Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## E.g
## podSecurityContext:
## fsGroup: 2000
##
podSecurityContext: {}
## @param securityContext Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## securityContext.capabilities The capabilities to add/drop when running containers
## securityContext.runAsUser Set pmm containers' Security Context runAsUser
## securityContext.runAsNonRoot Set pmm container's Security Context runAsNonRoot
## E.g.
## securityContext:
## capabilities:
## drop:
## - ALL
## readOnlyRootFilesystem: true
## runAsNonRoot: true
## runAsUser: 1000
securityContext: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}