initial commit
This commit is contained in:
271
odroid/postgres-operator-percona/helm/cluster.yaml
Executable file
271
odroid/postgres-operator-percona/helm/cluster.yaml
Executable file
@@ -0,0 +1,271 @@
|
||||
apiVersion: pgv2.percona.com/v2
|
||||
kind: PerconaPGCluster
|
||||
metadata:
|
||||
name: cluster1
|
||||
# finalizers:
|
||||
# - percona.com/delete-pvc
|
||||
# - percona.com/delete-ssl
|
||||
spec:
|
||||
crVersion: 2.3.0
|
||||
# secrets:
|
||||
# customTLSSecret:
|
||||
# name: cluster1-cert
|
||||
# customReplicationTLSSecret:
|
||||
# name: replication1-cert
|
||||
|
||||
# standby:
|
||||
# enabled: true
|
||||
# host: "<primary-ip>"
|
||||
# port: "<primary-port>"
|
||||
# repoName: repo1
|
||||
|
||||
# openshift: true
|
||||
|
||||
# users:
|
||||
# - name: rhino
|
||||
# databases:
|
||||
# - zoo
|
||||
# options: "SUPERUSER"
|
||||
# password:
|
||||
# type: ASCII
|
||||
# secretName: "rhino-credentials"
|
||||
|
||||
# databaseInitSQL:
|
||||
# key: init.sql
|
||||
# name: cluster1-init-sql
|
||||
|
||||
# pause: true
|
||||
# unmanaged: true
|
||||
|
||||
# dataSource:
|
||||
# postgresCluster:
|
||||
# clusterName: cluster1
|
||||
# repoName: repo1
|
||||
# options:
|
||||
# - --type=time
|
||||
# - --target="2021-06-09 14:15:11-04"
|
||||
# pgbackrest:
|
||||
# stanza: db
|
||||
# configuration:
|
||||
# - secret:
|
||||
# name: pgo-s3-creds
|
||||
# global:
|
||||
# repo1-path: /pgbackrest/postgres-operator/hippo/repo1
|
||||
# repo:
|
||||
# name: repo1
|
||||
# s3:
|
||||
# bucket: "my-bucket"
|
||||
# endpoint: "s3.ca-central-1.amazonaws.com"
|
||||
# region: "ca-central-1"
|
||||
|
||||
image: perconalab/percona-postgresql-operator:main-ppg15-postgres
|
||||
imagePullPolicy: Always
|
||||
postgresVersion: 15
|
||||
port: 5432
|
||||
|
||||
expose:
|
||||
annotations:
|
||||
my-annotation: percona-lb
|
||||
labels:
|
||||
my-label: percona-lb
|
||||
type: LoadBalancer
|
||||
|
||||
instances:
|
||||
- name: instance1
|
||||
replicas: 3
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 2.0
|
||||
# memory: 4Gi
|
||||
#
|
||||
# sidecars:
|
||||
# - name: testcontainer
|
||||
# image: mycontainer1:latest
|
||||
# - name: testcontainer2
|
||||
# image: mycontainer1:latest
|
||||
#
|
||||
# topologySpreadConstraints:
|
||||
# - maxSkew: 1
|
||||
# topologyKey: my-node-label
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# postgres-operator.crunchydata.com/instance-set: instance1
|
||||
#
|
||||
# tolerations:
|
||||
# - effect: NoSchedule
|
||||
# key: role
|
||||
# operator: Equal
|
||||
# value: connection-poolers
|
||||
#
|
||||
# priorityClassName: high-priority
|
||||
#
|
||||
# walVolumeClaimSpec:
|
||||
# accessModes:
|
||||
# - "ReadWriteOnce"
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 1Gi
|
||||
#
|
||||
dataVolumeClaimSpec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
proxy:
|
||||
pgBouncer:
|
||||
replicas: 3
|
||||
image: perconalab/percona-postgresql-operator:main-ppg15-pgbouncer
|
||||
# exposeSuperusers: true
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 128Mi
|
||||
#
|
||||
# expose:
|
||||
# annotations:
|
||||
# my-annotation: value1
|
||||
# labels:
|
||||
# my-label: value2
|
||||
# type: LoadBalancer
|
||||
#
|
||||
# affinity:
|
||||
# podAntiAffinity:
|
||||
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# - weight: 1
|
||||
# podAffinityTerm:
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# postgres-operator.crunchydata.com/cluster: keycloakdb
|
||||
# postgres-operator.crunchydata.com/role: pgbouncer
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
#
|
||||
# tolerations:
|
||||
# - effect: NoSchedule
|
||||
# key: role
|
||||
# operator: Equal
|
||||
# value: connection-poolers
|
||||
#
|
||||
# topologySpreadConstraints:
|
||||
# - maxSkew: 1
|
||||
# topologyKey: my-node-label
|
||||
# whenUnsatisfiable: ScheduleAnyway
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# postgres-operator.crunchydata.com/role: pgbouncer
|
||||
#
|
||||
# sidecars:
|
||||
# - name: bouncertestcontainer1
|
||||
# image: mycontainer1:latest
|
||||
#
|
||||
# customTLSSecret:
|
||||
# name: keycloakdb-pgbouncer.tls
|
||||
#
|
||||
# config:
|
||||
# global:
|
||||
# pool_mode: transaction
|
||||
|
||||
backups:
|
||||
pgbackrest:
|
||||
# metadata:
|
||||
# labels:
|
||||
image: perconalab/percona-postgresql-operator:main-ppg15-pgbackrest
|
||||
# configuration:
|
||||
# - secret:
|
||||
# name: cluster1-pgbackrest-secrets
|
||||
# jobs:
|
||||
# priorityClassName: high-priority
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 128Mi
|
||||
# tolerations:
|
||||
# - effect: NoSchedule
|
||||
# key: role
|
||||
# operator: Equal
|
||||
# value: connection-poolers
|
||||
#
|
||||
# global:
|
||||
# repo1-retention-full: "14"
|
||||
# repo1-retention-full-type: time
|
||||
# repo1-path: /pgbackrest/postgres-operator/cluster1/repo1
|
||||
# repo1-cipher-type: aes-256-cbc
|
||||
# repo1-s3-uri-style: path
|
||||
# repo2-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo2
|
||||
# repo3-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo3
|
||||
# repo4-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo4
|
||||
# repoHost:
|
||||
# priorityClassName: high-priority
|
||||
#
|
||||
# topologySpreadConstraints:
|
||||
# - maxSkew: 1
|
||||
# topologyKey: my-node-label
|
||||
# whenUnsatisfiable: ScheduleAnyway
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# postgres-operator.crunchydata.com/pgbackrest: ""
|
||||
# affinity:
|
||||
# podAntiAffinity:
|
||||
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# - weight: 1
|
||||
# podAffinityTerm:
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# postgres-operator.crunchydata.com/cluster: keycloakdb
|
||||
# postgres-operator.crunchydata.com/role: pgbouncer
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
#
|
||||
manual:
|
||||
repoName: repo1
|
||||
options:
|
||||
- --type=full
|
||||
repos:
|
||||
- name: repo1
|
||||
schedules:
|
||||
full: "0 0 * * 6"
|
||||
# differential: "0 1 * * 1-6"
|
||||
volume:
|
||||
volumeClaimSpec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
# - name: repo2
|
||||
# s3:
|
||||
# bucket: "<YOUR_AWS_S3_BUCKET_NAME>"
|
||||
# endpoint: "<YOUR_AWS_S3_ENDPOINT>"
|
||||
# region: "<YOUR_AWS_S3_REGION>"
|
||||
# - name: repo3
|
||||
# gcs:
|
||||
# bucket: "<YOUR_GCS_BUCKET_NAME>"
|
||||
# - name: repo4
|
||||
# azure:
|
||||
# container: "<YOUR_AZURE_CONTAINER>"
|
||||
#
|
||||
# restore:
|
||||
# enabled: true
|
||||
# repoName: repo1
|
||||
# options:
|
||||
# PITR restore in place
|
||||
# - --type=time
|
||||
# - --target="2021-06-09 14:15:11-04"
|
||||
# restore individual databases
|
||||
# - --db-include=hippo
|
||||
|
||||
pmm:
|
||||
enabled: true
|
||||
image: percona/pmm-client:2.37.0
|
||||
# imagePullPolicy: IfNotPresent
|
||||
secret: cluster1-pmm-secret
|
||||
serverHost: monitoring-service
|
||||
# patroni:
|
||||
# dynamicConfiguration:
|
||||
# postgresql:
|
||||
# parameters:
|
||||
# max_parallel_workers: 2
|
||||
# max_worker_processes: 2
|
||||
# shared_buffers: 1GB
|
||||
# work_mem: 2MB
|
||||
Reference in New Issue
Block a user