initial commit

This commit is contained in:
allard
2025-11-23 18:58:51 +01:00
commit 376a944abc
1553 changed files with 314731 additions and 0 deletions

1
odroid/README.md Normal file
View File

@@ -0,0 +1 @@
Dit zijn alle services op het PROD-cluster:

28
odroid/catalog-info.yaml Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ODROID-cluster
namespace: default
description: Alle deployments van ODROID-cluster
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/AllardKrings/kubernetes/odroid
title: AllardDCS Kubernetes Configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
- ./redis/catalog-info.yaml
- ./postgres-operator-percona/catalog-info.yaml
- ./nginx/catalog-info.yaml
- ./mongodb-operator/catalog-info.yaml
- ./traefik/catalog-info.yaml
- ./cnpg/catalog-info.yaml
- ./postgres-operator-zalando/catalog-info.yaml
- ./kubernetes/catalog-info.yaml
- ./pgadmin/catalog-info.yaml
- ./phpmyadmin/catalog-info.yaml
- ./minio/catalog-info.yaml

19
odroid/cnpg/README.md Executable file
View File

@@ -0,0 +1,19 @@
#Installatie:
============
kubectl apply --server-side -f \
https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml
#Cluster aanmaken:
----------------
kubectl apply -f postgres15.yaml
#Monitoring
-----------
kubectl create ns monitoring
helm upgrade --install \
-f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml \
prometheus-community \
prometheus-community/kube-prometheus-stack -n monitoring
kubectl apply -n monitoring -f \
https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/prometheusrule.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-cnpg
title: Cnpg (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

16082
odroid/cnpg/cnpg-1.23.2.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
microk8s kubectl delete secret minio-creds -n postgres
microk8s kubectl create secret generic minio-creds \
--from-literal=MINIO_ACCESS_KEY=Gudh6fKAlGv5PFWxLrCS \
--from-literal=MINIO_SECRET_KEY=L2CxDKJAvXS2h0KyWWX3fu9twiVIzR1tZpoEYINl \
--from-literal=REGION=us-east \
-n postgres

View File

@@ -0,0 +1,7 @@
apiVersion: postgresql.cnpg.io/v1
kind: Backup
metadata:
name: pg-backup-example
spec:
cluster:
name: pg-backup

View File

@@ -0,0 +1,47 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: post-init-sql-configmap
data:
configmap.sql: |
create table configmaps (i integer);
insert into configmaps (select generate_series(1,10000));
---
apiVersion: v1
kind: Secret
metadata:
name: post-init-sql-secret
stringData:
secret.sql: |
create table secrets (i integer);
insert into secrets (select generate_series(1,10000));
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example-initdb
spec:
instances: 3
bootstrap:
initdb:
database: appdb
owner: appuser
postInitSQL:
- create table numbers (i integer)
- insert into numbers (select generate_series(1,10000))
postInitTemplateSQL:
- create extension intarray
postInitApplicationSQL:
- create table application_numbers (i integer)
- insert into application_numbers (select generate_series(1,10000))
postInitApplicationSQLRefs:
configMapRefs:
- name: post-init-sql-configmap
key: configmap.sql
secretRefs:
- name: post-init-sql-secret
key: secret.sql
storage:
size: 1Gi

View File

@@ -0,0 +1,25 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgresql-pvc-template
spec:
instances: 3
# Example of rolling update strategy:
# - unsupervised: automated update of the primary once all
# replicas have been upgraded (default)
# - supervised: requires manual supervision to perform
# the switchover of the primary
primaryUpdateStrategy: unsupervised
# Persistent storage configuration
storage:
size: 1Gi
pvcTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: standard
volumeMode: Filesystem

View File

@@ -0,0 +1,32 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: pg-backup
spec:
instances: 3
# Example of rolling update strategy:
# - unsupervised: automated update of the primary once all
# replicas have been upgraded (default)
# - supervised: requires manual supervision to perform
# the switchover of the primary
primaryUpdateStrategy: unsupervised
# Persistent storage configuration
storage:
storageClass: standard
size: 1Gi
# Backup properties
backup:
barmanObjectStore:
destinationPath: s3://BUCKET_NAME/path/to/folder
s3Credentials:
accessKeyId:
name: aws-creds
key: ACCESS_KEY_ID
secretAccessKey:
name: aws-creds
key: ACCESS_SECRET_KEY
wal:
compression: gzip

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: grafana-lb
namespace: monitoring
spec:
externalTrafficPolicy: Cluster
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ports:
- nodePort: 30093
port: 3000
protocol: TCP
targetPort: 3000
selector:
app.kubernetes.io/instance: prometheus-community
app.kubernetes.io/name: grafana
type: LoadBalancer
status:
loadBalancer:

View File

@@ -0,0 +1,8 @@
apiVersion: postgresql.cnpg.io/v1
kind: Backup
metadata:
name: postgres13-backup
namespace: postgres
spec:
cluster:
name: postgres13

View File

@@ -0,0 +1,50 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres13
namespace: postgres
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:13.14-3
instances: 3
superuserSecret:
name: superuser-secret
bootstrap:
initdb:
postInitSQL:
- CREATE USER admin WITH PASSWORD 'Postgres01@'
- ALTER USER admin WITH SUPERUSER
- CREATE USER harbor WITH PASSWORD 'harbor'
- CREATE DATABASE harbor OWNER harbor
storage:
size: 5Gi
monitoring:
enablePodMonitor: true
backup:
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY
retentionPolicy: "1d"
---
apiVersion: v1
kind: Service
metadata:
name: postgres13-lb
namespace: postgres
spec:
selector:
cnpg.io/cluster: postgres13
role: primary
ports:
- port: 5432
targetPort: 5432
type: LoadBalancer

View File

@@ -0,0 +1,46 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres13-1
namespace: postgres
spec:
instances: 3
imageName: ghcr.io/cloudnative-pg/postgresql:13.14-3
superuserSecret:
name: superuser-secret
storage:
size: 5Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true
backup:
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY
retentionPolicy: "1d"
bootstrap:
recovery:
source: postgres13
externalClusters:
- name: postgres13
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY

View File

@@ -0,0 +1,46 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres13
namespace: postgres
spec:
instances: 3
imageName: ghcr.io/cloudnative-pg/postgresql:13.14-3
superuserSecret:
name: superuser-secret
storage:
size: 5Gi
storageClass: nfs-client
monitoring:
enablePodMonitor: true
backup:
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY
retentionPolicy: "1d"
bootstrap:
recovery:
source: postgres13-1
externalClusters:
- name: postgres13-1
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY

View File

@@ -0,0 +1,9 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: postgres13-backup
spec:
schedule: "0 0 14 * * *"
backupOwnerReference: self
cluster:
name: postgres13

View File

@@ -0,0 +1,8 @@
apiVersion: postgresql.cnpg.io/v1
kind: Backup
metadata:
name: postgres14-backup
namespace: postgres
spec:
cluster:
name: postgres14

View File

@@ -0,0 +1,50 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres14
namespace: postgres
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:14.11-3
instances: 3
superuserSecret:
name: superuser-secret
bootstrap:
initdb:
postInitSQL:
- CREATE USER admin WITH PASSWORD 'Postgres01@'
- ALTER USER admin WITH SUPERUSER
- CREATE USER harbor WITH PASSWORD 'harbor'
- CREATE DATABASE harbor OWNER harbor
storage:
size: 5Gi
monitoring:
enablePodMonitor: true
backup:
barmanObjectStore:
destinationPath: 's3://backups/'
endpointURL: 'http://minio.postgres:9000'
s3Credentials:
accessKeyId:
name: minio-creds
key: MINIO_ACCESS_KEY
secretAccessKey:
name: minio-creds
key: MINIO_SECRET_KEY
retentionPolicy: "1d"
---
apiVersion: v1
kind: Service
metadata:
name: postgres14-lb
namespace: postgres
spec:
selector:
cnpg.io/cluster: postgres14
role: primary
ports:
- port: 5432
targetPort: 5432
type: LoadBalancer

9
odroid/cnpg/script.sh Executable file
View File

@@ -0,0 +1,9 @@
CLUSTER_NAME=postgres13
NAMESPACE=postgres
PRIMARY=$(kubectl get cluster -n "$NAMESPACE" "$CLUSTER_NAME" -o jsonpath='{.status.currentPrimary}')
for pvc in $(kubectl get pvc -n "$NAMESPACE" -l"cnpg.io/cluster=$CLUSTER_NAME" -o name); do
ROLE=$([ "$pvc" = "persistentvolumeclaim/$PRIMARY" ] && echo primary || echo replica)
kubectl label -n "$NAMESPACE" "$pvc" --overwrite "cnpg.io/instanceRole=$ROLE" "role=$ROLE"
done
kubectl delete pod -n "$NAMESPACE" -l "cnpg.io/cluster=$CLUSTER_NAME"

View File

@@ -0,0 +1,8 @@
apiVersion: v1
data:
username: cG9zdGdyZXMK
password: UG9zdGdyZXMwMUA=
kind: Secret
metadata:
name: superuser-secret
type: kubernetes.io/basic-auth

2
odroid/cnpg/tenant.txt Normal file
View File

@@ -0,0 +1,2 @@
LyxDkhkSaQvoLUfb
ew3h3CEilTbGu6rpGG918zl0LdnVP32s

6
odroid/grafana/README.md Normal file
View File

@@ -0,0 +1,6 @@
#Installatie grafana Dashboard:
log in op gui van grafana: admin prom-operator
Maak nieuw dashboard aan.
Importeer dashboard met id : 20417

View File

@@ -0,0 +1,21 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: grafana-odroid-alldcs
namespace: monitoring
spec:
entryPoints:
- websecure
routes:
- match: Host(`grafana-odroid.alldcs.nl`)
kind: Rule
services:
- name: prometheus-community-grafana
port: 80
tls:
certResolver: letsencrypt

17
odroid/kubernetes/README.md Executable file
View File

@@ -0,0 +1,17 @@
3) microk8s enable dashboard
2) creer account: kubectl apply -f ServiceAccount.yaml
3) creeer clusterrolebinding: kubectl aply -f ClusterRoleBinding.yaml
4) creeer ingressroute: kubectl apply -f Ingressroute-tls.yaml
5) genereer token:
kubectl -n kube-system create token admin-user --duration=8544h
Herinstallatie:
na herinstallatie moet je de config opnieuw kopieren anders klopt het certificaat niet meer:
sudo cp -i /var/snap/microk8s/current/credentials/client.config ${HOME}/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/config

38
odroid/kubernetes/TIPS.md Executable file
View File

@@ -0,0 +1,38 @@
#Als een pvc in de status "terminating" blijft hangen kan het volgende commando
#helpen:
kubectl patch pvc {PVC_NAME} -p '{"metadata":{"finalizers":null}}'
#Switchen van context:
kubectl config set-context --current --namespace=tektontutorial
#Als je bij uitvoeren van kubectl "connection refused " krijgt
#kunnen de volgende commando's helpen:
sudo microk8s.refresh-certs --cert ca.crt
sudo microk8s.refresh-certs --cert server.crt
aanpassen clusternaam:
nano /var/snap/micrk8s/current/credentials/client.config
Daarna certificaten opnieuw genereren:
sudo microk8s.refresh-certs --cert ca.crt
sudo microk8s.refresh-certs --cert server.crt
kubectl configuratie opnieuw genereren:
microk8s.kubectl config view --raw > $HOME/.kube/config
#metallb speaker permission errors
sudo nano /etc/apparmor.d/cri-containerd.apparmor.d
network,
sudo apparmor_parser -r /etc/apparmor.d/cri-containerd.apparmor.d
#volle schijf:
sudo microk8s ctr images list -q | xargs -r sudo microk8s ctr images rm

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-kubernetes
title: Kubernetes (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
email: admin@allarddcs.nl
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-account-key
solvers:
- http01:
ingress:
class: traefik

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1 @@
microk8s kubectl -n kube-system create token admin-user --duration=8544h

View File

@@ -0,0 +1,29 @@
apiVersion: traefik.io/v1alpha1
kind: ServersTransport
metadata:
name: kubernetes-dashboard-transport
namespace: kube-system
spec:
serverName: kubernetes-dashboard
insecureSkipVerify: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kubernetes-dashboard-tls
namespace: kube-system
spec:
entryPoints: # [1]
- websecure
routes: # [2]
- kind: Rule
match: Host(`kubernetes-odroid.allarddcs.nl`) # [3]
priority: 10 # [4]
services: # [8]
- kind: Service
name: kubernetes-dashboard
namespace: kube-system
port: 443 # [9]
serversTransport: kubernetes-dashboard-transport
tls: # [11]
certResolver: letsencrypt

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-minio
title: Minio (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

3
odroid/minio/minio.txt Normal file
View File

@@ -0,0 +1,3 @@
Username: I8VTWUTPITYMGQ8BL22V
Password: Snd6kC8KL73E7FyHaf3vMmgD8iDSHlGdEcrAHIoJ
Note: Copy the credentials to a secure location. MinIO will not display these again.

140
odroid/minio/minio.yaml Normal file
View File

@@ -0,0 +1,140 @@
apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: minio
namespace: postgres # Change this value to match the namespace metadata.name
spec:
selector:
matchLabels:
app: minio
strategy:
type: Recreate
template:
metadata:
labels:
# Label is used as selector in the service.
app: minio
spec:
# Refer to the PVC created earlier
volumes:
- name: storage
persistentVolumeClaim:
# Name of the PVC created earlier
claimName: minio-pvc
containers:
- name: minio
# Pulls the default Minio image from Docker Hub
image: minio/minio:latest
command:
- /bin/bash
- -c
args:
- minio server /storage --console-address :9090
env:
# Minio access key and secret key
- name: MINIO_ROOT_USER
value: "admin"
- name: MINIO_ROOT_PASSWORD
value: "Minio01@"
ports:
- containerPort: 9000
hostPort: 9000
- containerPort: 9090
hostPort: 9090
# Mount the volume into the pod
volumeMounts:
- name: storage # must match the volume name, above
mountPath: "/storage"
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: postgres
spec:
type: ClusterIP
ports:
- name: minio-console
port: 9090
selector:
app: minio
---
apiVersion: v1
kind: Service
metadata:
name: minio-api
namespace: postgres
spec:
type: ClusterIP
ports:
- name: minio-api
port: 9000
selector:
app: minio
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: minio-allarddcs-tls
namespace: postgres
spec:
entryPoints:
- websecure
routes:
- match: Host(`minio-odroid.allarddcs.nl`)
kind: Rule
services:
- name: minio
port: 9090
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: minio-api-allarddcs-tls
namespace: postgres
spec:
entryPoints:
- websecure
routes:
- match: Host(`minio-odroid-api.allarddcs.nl`)
kind: Rule
services:
- name: minio-api
port: 9000
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: minio-pv
spec:
storageClassName: ""
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/minio
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: minio-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 5Gi

View File

@@ -0,0 +1,17 @@
INSTALLATIE:
============
#als nog niet gedaan:
kubectl create ns mongodb-operator
kubectl create ns mongodb
#als nog niet gedaan:
helm repo add mongodb https://mongodb.github.io/helm-charts
helm repo update
#installatie operator:
helm install community-operator mongodb/community-operator --namespace mongodb-operator --set operator.watchNamespace="*"

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-mongodb-operator
title: Mongodb-operator (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-nginx
title: Nginx (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,78 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nginx
subPath: html
ports:
- containerPort: 80
volumes:
- name: nginx
persistentVolumeClaim:
claimName: nginx-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
name: nginx
spec:
type: NodePort
ports:
- port: 80
nodePort: 30080
name: http
selector:
app: nginx
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/nginx-odroid
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-pvc
spec:
storageClassName: ""
volumeName: nginx-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/nginx-odroid
readOnly: false

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-pvc
spec:
storageClassName: ""
volumeName: nginx-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
name: nginx
spec:
type: NodePort
ports:
- port: 80
nodePort: 30080
name: http
selector:
app: nginx

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: external-nginx
spec:
ports:
- protocol: TCP
port: 30080
targetPort: 30080
---
apiVersion: v1
kind: Endpoints
metadata:
name: external-nginx
subsets:
- addresses:
- ip: 192.168.2.111
ports:
- port: 30080

View File

@@ -0,0 +1,13 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: odroid-http-alldcs
spec:
entryPoints:
- web
routes:
- match: Host(`odroid.alldcs.nl`)
kind: Rule
services:
- name: external-nginx
port: 30080

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: odroid-tls-alldcs
spec:
entryPoints:
- websecure
routes:
- match: Host(`odroid.alldcs.nl`)
kind: Rule
services:
- name: external-nginx
port: 80
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-pgadmin
title: Pgadmin (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

101
odroid/pgadmin/pgadmin.yaml Executable file
View File

@@ -0,0 +1,101 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
namespace: postgres
labels:
app: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
containers:
- name: pgadmin
image: dpage/pgadmin4
ports:
- containerPort: 80
env:
- name: PGADMIN_DEFAULT_EMAIL
value: admin@alldcs.nl
- name: PGADMIN_DEFAULT_PASSWORD
value: Pgadmin01@
volumeMounts:
- mountPath: /var/lib/pgadmin
name: pgadmin
volumes:
- name: pgadmin
persistentVolumeClaim:
claimName: pgadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pgadmin
namespace: postgres
labels:
name: pgadmin
spec:
type: ClusterIP
ports:
- port: 80
name: http
selector:
app: pgadmin
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-odroid-alldcs
namespace: postgres
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin-odroid.allarddcs.nl`)
kind: Rule
services:
- name: pgadmin
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/pgadmin/odroid
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: pgadmin-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-phpmyadmin
title: Phpmyadmin (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

101
odroid/phpmyadmin/phpmyadmin.yaml Executable file
View File

@@ -0,0 +1,101 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: phpmyadmin
namespace: mariadb
labels:
app: phpmyadmin
spec:
replicas: 1
selector:
matchLabels:
app: phpmyadmin
template:
metadata:
labels:
app: phpmyadmin
spec:
containers:
- name: phpmyadmin
image: arm64v8/phpmyadmin
ports:
- containerPort: 80
env:
- name: PMA_HOST
value: mariadb
- name: PMA_PORT
value: "3306"
- name: MYSQL_ROOT_PASSWORD
value: "zabbix"
# volumeMounts:
# - name: phpconfig
# mountPath: /etc/phpmyadmin
# volumes:
# - name: phpconfig
# persistentVolumeClaim:
# claimName: phpmyadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: phpmyadmin
namespace: mariadb
spec:
selector:
app.kubernetes.io/name: phpmyadmin
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: phpmyadmin
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: phpmyadmin-tls-alldcs
namespace: mariadb
spec:
entryPoints:
- websecure
routes:
- match: Host(`phpmyadmin-dev.alldcs.nl`)
kind: Rule
services:
- name: phpmyadmin
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: phpmyadmin-pv
labels:
type: local
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/phpmyadmin
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: phpmyadmin-pvc
namespace: mariadb
spec:
storageClassName: ""
volumeName: phpmyadmin-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1G

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-postgres-operator-percona
title: Postgres-operator-percona (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,6 @@
#deploy operator
kubectl apply --server-side -f \
https://raw.githubusercontent.com/percona/percona-postgresql-operator/main/deploy/bundle.yaml
#deploy cluster
kubectl apply
-f https://raw.githubusercontent.com/percona/percona-postgresql-operator/main/deploy/cr.yaml

View File

@@ -0,0 +1,271 @@
apiVersion: pgv2.percona.com/v2
kind: PerconaPGCluster
metadata:
name: cluster1
# finalizers:
# - percona.com/delete-pvc
# - percona.com/delete-ssl
spec:
crVersion: 2.3.0
# secrets:
# customTLSSecret:
# name: cluster1-cert
# customReplicationTLSSecret:
# name: replication1-cert
# standby:
# enabled: true
# host: "<primary-ip>"
# port: "<primary-port>"
# repoName: repo1
# openshift: true
# users:
# - name: rhino
# databases:
# - zoo
# options: "SUPERUSER"
# password:
# type: ASCII
# secretName: "rhino-credentials"
# databaseInitSQL:
# key: init.sql
# name: cluster1-init-sql
# pause: true
# unmanaged: true
# dataSource:
# postgresCluster:
# clusterName: cluster1
# repoName: repo1
# options:
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# pgbackrest:
# stanza: db
# configuration:
# - secret:
# name: pgo-s3-creds
# global:
# repo1-path: /pgbackrest/postgres-operator/hippo/repo1
# repo:
# name: repo1
# s3:
# bucket: "my-bucket"
# endpoint: "s3.ca-central-1.amazonaws.com"
# region: "ca-central-1"
image: perconalab/percona-postgresql-operator:main-ppg15-postgres
imagePullPolicy: Always
postgresVersion: 15
port: 5432
expose:
annotations:
my-annotation: percona-lb
labels:
my-label: percona-lb
type: LoadBalancer
instances:
- name: instance1
replicas: 3
# resources:
# limits:
# cpu: 2.0
# memory: 4Gi
#
# sidecars:
# - name: testcontainer
# image: mycontainer1:latest
# - name: testcontainer2
# image: mycontainer1:latest
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/instance-set: instance1
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# priorityClassName: high-priority
#
# walVolumeClaimSpec:
# accessModes:
# - "ReadWriteOnce"
# resources:
# requests:
# storage: 1Gi
#
dataVolumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
proxy:
pgBouncer:
replicas: 3
image: perconalab/percona-postgresql-operator:main-ppg15-pgbouncer
# exposeSuperusers: true
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
#
# expose:
# annotations:
# my-annotation: value1
# labels:
# my-label: value2
# type: LoadBalancer
#
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/role: pgbouncer
#
# sidecars:
# - name: bouncertestcontainer1
# image: mycontainer1:latest
#
# customTLSSecret:
# name: keycloakdb-pgbouncer.tls
#
# config:
# global:
# pool_mode: transaction
backups:
pgbackrest:
# metadata:
# labels:
image: perconalab/percona-postgresql-operator:main-ppg15-pgbackrest
# configuration:
# - secret:
# name: cluster1-pgbackrest-secrets
# jobs:
# priorityClassName: high-priority
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# global:
# repo1-retention-full: "14"
# repo1-retention-full-type: time
# repo1-path: /pgbackrest/postgres-operator/cluster1/repo1
# repo1-cipher-type: aes-256-cbc
# repo1-s3-uri-style: path
# repo2-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo2
# repo3-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo3
# repo4-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo4
# repoHost:
# priorityClassName: high-priority
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/pgbackrest: ""
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
manual:
repoName: repo1
options:
- --type=full
repos:
- name: repo1
schedules:
full: "0 0 * * 6"
# differential: "0 1 * * 1-6"
volume:
volumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
# - name: repo2
# s3:
# bucket: "<YOUR_AWS_S3_BUCKET_NAME>"
# endpoint: "<YOUR_AWS_S3_ENDPOINT>"
# region: "<YOUR_AWS_S3_REGION>"
# - name: repo3
# gcs:
# bucket: "<YOUR_GCS_BUCKET_NAME>"
# - name: repo4
# azure:
# container: "<YOUR_AZURE_CONTAINER>"
#
# restore:
# enabled: true
# repoName: repo1
# options:
# PITR restore in place
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# restore individual databases
# - --db-include=hippo
pmm:
enabled: true
image: percona/pmm-client:2.37.0
# imagePullPolicy: IfNotPresent
secret: cluster1-pmm-secret
serverHost: monitoring-service
# patroni:
# dynamicConfiguration:
# postgresql:
# parameters:
# max_parallel_workers: 2
# max_worker_processes: 2
# shared_buffers: 1GB
# work_mem: 2MB

View File

@@ -0,0 +1,122 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: percona-pmm-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/percona-pmm
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: percona-pmm-pvc
spec:
storageClassName: ""
volumeName: percona-pmm-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: percona-pmm
labels:
app: percona-pmm
spec:
replicas: 1
selector:
matchLabels:
app: percona-pmm
template:
metadata:
labels:
app: percona-pmm
spec:
containers:
- name: percona-pmm
image: percona/pmm-server:2
ports:
- containerPort: 443
env:
- name: DISABLE_UPDATES #Disable automatic updates
value: 'true'
- name: DISABLE_TELEMETRY #Disable built-in telemetry and disable STT if telemetry is disabled
value: 'true'
- name: METRICS_RESOLUTION #High metrics resolution in seconds
value: 10s
- name: METRICS_RESOLUTION_HR #High metrics resolution (same as above)
value: 10s
- name: METRICS_RESOLUTION_MR #Medium metrics resolution in seconds
value: '100s'
- name: METRICS_RESOLUTION_LR #Low metrics resolution in seconds
value: '500s'
- name: DATA_RETENTION #How long to keep time-series data in ClickHouse. This variable accepts golang style duration format, example: 24h, 30m, 10s
value: '24h'
- name: ENABLE_VM_CACHE #Enable cache in VM
value: 'false'
- name: ENABLE_ALERTING #Enable integrated alerting
value: 'false'
- name: ENABLE_AZUREDISCOVER #Enable support for discovery of Azure databases
value: 'false'
- name: ENABLE_BACKUP_MANAGEMENT #Enable integrated backup tools
value: 'true'
- name: ENABLE_DBAAS #Enable DBaaS features
value: 'true'
- name: PMM_PUBLIC_ADDRESS #External IP address or the DNS name on which PMM server is running.
value: ''
- name: PMM_DEBUG #Enables a more verbose log level
value: ''
- name: PMM_TRACE
value: ''
volumeMounts:
- mountPath: /svr
name: percona-pmm
volumes:
- name: percona-pmm
persistentVolumeClaim:
claimName: percona-pmm-pvc
---
apiVersion: v1
kind: Service
metadata:
name: percona-pmm
spec:
selector:
app.kubernetes.io/name: percona-pmm
ports:
- protocol: TCP
port: 443
targetPort: 443
selector:
app: percona-pmm
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: percona-pmm-tls
spec:
entryPoints:
- websecure
routes:
- match: Host(`percona.alldcs.nl`)
kind: Rule
services:
- name: percona-pmm
port: 443
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,271 @@
apiVersion: pgv2.percona.com/v2
kind: PerconaPGCluster
metadata:
name: cluster1
# finalizers:
# - percona.com/delete-pvc
# - percona.com/delete-ssl
spec:
crVersion: 2.3.0
# secrets:
# customTLSSecret:
# name: cluster1-cert
# customReplicationTLSSecret:
# name: replication1-cert
# standby:
# enabled: true
# host: "<primary-ip>"
# port: "<primary-port>"
# repoName: repo1
# openshift: true
# users:
# - name: rhino
# databases:
# - zoo
# options: "SUPERUSER"
# password:
# type: ASCII
# secretName: "rhino-credentials"
# databaseInitSQL:
# key: init.sql
# name: cluster1-init-sql
# pause: true
# unmanaged: true
# dataSource:
# postgresCluster:
# clusterName: cluster1
# repoName: repo1
# options:
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# pgbackrest:
# stanza: db
# configuration:
# - secret:
# name: pgo-s3-creds
# global:
# repo1-path: /pgbackrest/postgres-operator/hippo/repo1
# repo:
# name: repo1
# s3:
# bucket: "my-bucket"
# endpoint: "s3.ca-central-1.amazonaws.com"
# region: "ca-central-1"
image: perconalab/percona-postgresql-operator:main-ppg15-postgres
imagePullPolicy: Always
postgresVersion: 15
port: 5432
expose:
annotations:
my-annotation: percona-lb
labels:
my-label: percona-lb
type: LoadBalancer
instances:
- name: instance1
replicas: 3
# resources:
# limits:
# cpu: 2.0
# memory: 4Gi
#
# sidecars:
# - name: testcontainer
# image: mycontainer1:latest
# - name: testcontainer2
# image: mycontainer1:latest
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/instance-set: instance1
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# priorityClassName: high-priority
#
# walVolumeClaimSpec:
# accessModes:
# - "ReadWriteOnce"
# resources:
# requests:
# storage: 1Gi
#
dataVolumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
proxy:
pgBouncer:
replicas: 3
image: perconalab/percona-postgresql-operator:main-ppg15-pgbouncer
# exposeSuperusers: true
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
#
# expose:
# annotations:
# my-annotation: value1
# labels:
# my-label: value2
# type: LoadBalancer
#
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/role: pgbouncer
#
# sidecars:
# - name: bouncertestcontainer1
# image: mycontainer1:latest
#
# customTLSSecret:
# name: keycloakdb-pgbouncer.tls
#
# config:
# global:
# pool_mode: transaction
backups:
pgbackrest:
# metadata:
# labels:
image: perconalab/percona-postgresql-operator:main-ppg15-pgbackrest
# configuration:
# - secret:
# name: cluster1-pgbackrest-secrets
# jobs:
# priorityClassName: high-priority
# resources:
# limits:
# cpu: 200m
# memory: 128Mi
# tolerations:
# - effect: NoSchedule
# key: role
# operator: Equal
# value: connection-poolers
#
# global:
# repo1-retention-full: "14"
# repo1-retention-full-type: time
# repo1-path: /pgbackrest/postgres-operator/cluster1/repo1
# repo1-cipher-type: aes-256-cbc
# repo1-s3-uri-style: path
# repo2-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo2
# repo3-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo3
# repo4-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo4
# repoHost:
# priorityClassName: high-priority
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: my-node-label
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/pgbackrest: ""
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# podAffinityTerm:
# labelSelector:
# matchLabels:
# postgres-operator.crunchydata.com/cluster: keycloakdb
# postgres-operator.crunchydata.com/role: pgbouncer
# topologyKey: kubernetes.io/hostname
#
manual:
repoName: repo1
options:
- --type=full
repos:
- name: repo1
schedules:
full: "0 0 * * 6"
# differential: "0 1 * * 1-6"
volume:
volumeClaimSpec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
# - name: repo2
# s3:
# bucket: "<YOUR_AWS_S3_BUCKET_NAME>"
# endpoint: "<YOUR_AWS_S3_ENDPOINT>"
# region: "<YOUR_AWS_S3_REGION>"
# - name: repo3
# gcs:
# bucket: "<YOUR_GCS_BUCKET_NAME>"
# - name: repo4
# azure:
# container: "<YOUR_AZURE_CONTAINER>"
#
# restore:
# enabled: true
# repoName: repo1
# options:
# PITR restore in place
# - --type=time
# - --target="2021-06-09 14:15:11-04"
# restore individual databases
# - --db-include=hippo
pmm:
enabled: true
image: percona/pmm-client:2.37.0
# imagePullPolicy: IfNotPresent
secret: cluster1-pmm-secret
serverHost: monitoring-service
# patroni:
# dynamicConfiguration:
# postgresql:
# parameters:
# max_parallel_workers: 2
# max_worker_processes: 2
# shared_buffers: 1GB
# work_mem: 2MB

View File

@@ -0,0 +1,2 @@
echo user: $(microk8s kubectl get secret cluster1-pguser-cluster1 -o jsonpath="{.data.user}" | base64 -d)
echo password: $(microk8s kubectl get secret cluster1-pguser-cluster1 -o jsonpath="{.data.password}" | base64 -d)

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: percona-pmm-tls
spec:
entryPoints:
- websecure
routes:
- match: Host(`percona.alldcs.nl`)
kind: Rule
services:
- name: percona-pmm
port: 443
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,28 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: percona-route-tcp-tls
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`percona.alldcs.nl`)
services:
- name: percona-pmm
port: 443
tls:
passthrough: true
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: percona-route-tcp-http
spec:
entryPoints:
- web
routes:
- match: HostSNI(`percona.alldcs.nl`)
services:
- name: percona-pmm
port: 80

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: pmm-secret
labels:
app.kubernetes.io/name: pmm
type: Opaque
data:
# base64 encoded password
# encode some password: `echo -n "admin" | base64`
PMM_ADMIN_PASSWORD: YWRtaW4=

View File

@@ -0,0 +1,258 @@
## @section Percona Monitoring and Management (PMM) parameters
## Default values for PMM.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
## PMM image version
## ref: https://hub.docker.com/r/percona/pmm-server/tags
## @param image.repository PMM image repository
## @param image.pullPolicy PMM image pull policy
## @param image.tag PMM image tag (immutable tags are recommended)
## @param image.imagePullSecrets Global Docker registry secret names as an array
##
image:
repository: percona/pmm-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "2.39.0"
imagePullSecrets: []
## PMM environment variables
## ref: https://docs.percona.com/percona-monitoring-and-management/setting-up/server/docker.html#environment-variables
##
pmmEnv:
## @param pmmEnv.DISABLE_UPDATES Disables a periodic check for new PMM versions as well as ability to apply upgrades using the UI (need to be disabled in k8s environment as updates rolled with helm/container update)
##
DISABLE_UPDATES: "1"
# ENABLE_DBAAS: "1"
# optional variables to integrate Grafana with internal iDP, see also secret part
# GF_AUTH_GENERIC_OAUTH_ENABLED: 'true'
# GF_AUTH_GENERIC_OAUTH_SCOPES: ''
# GF_AUTH_GENERIC_OAUTH_AUTH_URL: ''
# GF_AUTH_GENERIC_OAUTH_TOKEN_URL: ''
# GF_AUTH_GENERIC_OAUTH_API_URL: ''
# GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: ''
## @param pmmResources optional [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) requested for [PMM container](https://docs.percona.com/percona-monitoring-and-management/setting-up/server/index.html#set-up-pmm-server)
## pmmResources:
## requests:
## memory: "32Gi"
## cpu: "8"
## limits:
## memory: "64Gi"
## cpu: "32"
pmmResources: {}
## Readiness probe Config
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes
## @param readyProbeConf.initialDelaySeconds Number of seconds after the container has started before readiness probes is initiated
## @param readyProbeConf.periodSeconds How often (in seconds) to perform the probe
## @param readyProbeConf.failureThreshold When a probe fails, Kubernetes will try failureThreshold times before giving up
##
readyProbeConf:
initialDelaySeconds: 1
periodSeconds: 5
failureThreshold: 6
## @section PMM secrets
##
secret:
## @param secret.name Defines the name of the k8s secret that holds passwords and other secrets
##
name: pmm-secret
## @param secret.create If true then secret will be generated by Helm chart. Otherwise it is expected to be created by user.
##
create: true
## @param secret.pmm_password Initial PMM password - it changes only on the first deployment, ignored if PMM was already provisioned and just restarted. If PMM admin password is not set, it will be generated.
## E.g.
## pmm_password: admin
##
## To get password execute `kubectl get secret pmm-secret -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode`
##
pmm_password: ""
##
# GF_AUTH_GENERIC_OAUTH_CLIENT_ID optional client ID to integrate Grafana with internal iDP, requires other env defined as well under pmmEnv
# GF_AUTH_GENERIC_OAUTH_CLIENT_ID:
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET optional secret to integrate Grafana with internal iDP, requires other env defined as well under pmmEnv
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET:
## @param certs Optional certificates, if not provided PMM would use generated self-signed certificates,
## please provide your own signed ssl certificates like this:
## certs:
## name: pmm-certs
## files:
## certificate.crt:
## certificate.key:
## ca-certs.pem:
## dhparam.pem:
certs: {}
## @section PMM network configuration
## Service configuration
##
service:
## @param service.name Service name that is dns name monitoring services would send data to. `monitoring-service` used by default by pmm-client in Percona operators.
##
name: percona-pmm
## @param service.type Kubernetes Service type
##
type: NodePort
## Ports 443 and/or 80
##
ports:
## @param service.ports[0].port https port number
- port: 443
## @param service.ports[0].targetPort target port to map for statefulset and ingress
targetPort: https
## @param service.ports[0].protocol protocol for https
protocol: TCP
## @param service.ports[0].name port name
name: https
## @param service.ports[1].port http port number
- port: 80
## @param service.ports[1].targetPort target port to map for statefulset and ingress
targetPort: http
## @param service.ports[1].protocol protocol for http
protocol: TCP
## @param service.ports[1].name port name
name: http
## Ingress controller configuration
##
ingress:
## @param ingress.enabled -- Enable ingress controller resource
enabled: false
## @param ingress.nginxInc -- Using ingress controller from NGINX Inc
nginxInc: false
## @param ingress.annotations -- Ingress annotations configuration
annotations: {}
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
### nginx proxy to https
## nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
## @param ingress.community.annotations -- Ingress annotations configuration for community managed ingress (nginxInc = false)
community:
annotations: {}
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
## @param ingress.ingressClassName -- Sets the ingress controller class name to use.
ingressClassName: ""
## Ingress resource hostnames and path mappings
hosts:
## @param ingress.hosts[0].host hostname
- host: chart-example.local
## @param ingress.hosts[0].paths path mapping
paths: []
## @param ingress.pathType -- How ingress paths should be treated.
pathType: Prefix
## @param ingress.tls -- Ingress TLS configuration
tls: []
## - secretName: chart-example-tls
## hosts:
## - chart-example.local
## @section PMM storage configuration
## Claiming storage for PMM using Persistent Volume Claims (PVC)
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
storage:
## @param storage.name name of PVC
name: pmm-storage
## @param storage.storageClassName optional PMM data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClassName: ""
##
## @param storage.size size of storage [depends](https://docs.percona.com/percona-monitoring-and-management/setting-up/server/index.html#set-up-pmm-server) on number of monitored services and data retention
##
size: 10Gi
##
## @param storage.dataSource VolumeSnapshot to start from
##
dataSource: {}
## name: before-vX.Y.Z-upgrade
## kind: VolumeSnapshot
## apiGroup: snapshot.storage.k8s.io
##
## @param storage.selector select existing PersistentVolume
##
selector: {}
## matchLabels:
## release: "stable"
## matchExpressions:
## - key: environment
## operator: In
## values:
## - dev
## @section PMM kubernetes configurations
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param extraLabels Labels to add to all deployed objects
##
extraLabels: {}
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
## @param serviceAccount.create Specifies whether a ServiceAccount should be created
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template.
##
serviceAccount:
create: true
annotations: {}
name: "pmm-service-account"
## @param podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podSecurityContext Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## E.g
## podSecurityContext:
## fsGroup: 2000
##
podSecurityContext: {}
## @param securityContext Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## securityContext.capabilities The capabilities to add/drop when running containers
## securityContext.runAsUser Set pmm containers' Security Context runAsUser
## securityContext.runAsNonRoot Set pmm container's Security Context runAsNonRoot
## E.g.
## securityContext:
## capabilities:
## drop:
## - ALL
## readOnlyRootFilesystem: true
## runAsNonRoot: true
## runAsUser: 1000
securityContext: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}

View File

@@ -0,0 +1,5 @@
# https://github.com/golangci/golangci/wiki/Configuration
service:
prepare:
- make deps

View File

@@ -0,0 +1,22 @@
Installatie:
============
helm repo add postgres-operator-charts https://opensource.zalando.com/postgres-operator/charts/postgres-operator
helm install postgres-operator postgres-operator-charts/postgres-operator
helm repo add postgres-operator-ui-charts https://opensource.zalando.com/postgres-operator/charts/postgres-operator-ui
helm install postgres-operator-ui postgres-operator-ui-charts/postgres-operator-ui -f postgres-operator-values.yaml
Password:
---------
echo "Password: $(kubectl -n default get secret \
postgres.postgres-cluster-1.credentials.postgresql.acid.zalan.do -o \
jsonpath="{.data.password}" | base64 -d)"
External access:
----------------
exposen via loadbalancer!
Let op bij installatie UI wel de postgres-operator-values.yaml meenemen i.v.m. URL!
b.v. appUrl: "http://pgzalando.alldcs.online:8081"

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-postgres-operator-zalando
title: Postgres-operator-zalando (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,2 @@
echo User: $(microk8s kubectl get secret postgres.postgres-cluster-2.credentials.postgresql.acid.zalan.do -o jsonpath="{.data.username}" | base64 -d)
echo Password: $(microk8s kubectl get secret postgres.postgres-cluster-2.credentials.postgresql.acid.zalan.do -o jsonpath="{.data.password}" | base64 -d)

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: postgres-operator-tls-alldcs
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgzalando.alldcs.nl`)
kind: Rule
services:
- name: postgres-operator-ui
port: 8081
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,2 @@
microk8s helm install postgres-operator postgres-operator-charts/postgres-operator
microk8s helm install postgres-operator-ui postgres-operator-ui-charts/postgres-operator-ui -f postgres-operator-ui-values.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: postgres-cluster-2-lb
namespace: default
spec:
ports:
- port: 5432
protocol: TCP
targetPort: 5432
selector:
# app: postgres-cluster-1-0
statefulset.kubernetes.io/pod-name: postgres-cluster-2-0
type: LoadBalancer

View File

@@ -0,0 +1,30 @@
kind: "postgresql"
apiVersion: "acid.zalan.do/v1"
metadata:
name: "postgres-cluster"
namespace: "default"
labels:
team: acid
spec:
teamId: "acid"
postgresql:
version: "15"
numberOfInstances: 3
enableMasterLoadBalancer: true
enableLogicalBackup: true
volume:
size: "2Gi"
allowedSourceRanges:
# IP ranges to access your cluster go here
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: postgres-operator-ui-lb
spec:
ports:
- name: http
targetPort: 8081
port: 8081
selector:
app: postgres-operator-ui
type: LoadBalancer

View File

@@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: postgres-operator-ui
meta.helm.sh/release-namespace: default
labels:
app.kubernetes.io/instance: postgres-operator-ui
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: postgres-operator-ui
helm.sh/chart: postgres-operator-ui-1.10.0
name: postgres-operator-ui
namespace: default
spec:
ports:
- port: 8081
protocol: TCP
targetPort: 8081
selector:
app.kubernetes.io/instance: postgres-operator-ui
app.kubernetes.io/name: postgres-operator-ui
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,110 @@
# Default values for postgres-operator-ui.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# configure ui image
image:
registry: registry.opensource.zalan.do
repository: acid/postgres-operator-ui
tag: v1.9.0
pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name:
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# configure UI pod resources
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
# configure UI ENVs
envs:
# IMPORTANT: While operator chart and UI chart are independent, this is the interface between
# UI and operator API. Insert the service name of the operator API here!
appUrl: "http://pgzalando.alldcs.online:8081"
operatorApiUrl: "http://postgres-operator:8080"
operatorClusterNameLabel: "cluster-name"
resourcesVisible: "False"
targetNamespace: "default"
teams:
- "acid"
# Extra pod annotations
podAnnotations:
{}
# configure extra UI ENVs
# Extra ENVs are writen in kubenertes format and added "as is" to the pod's env variables
# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
# UI specific env variables can be found here: https://github.com/zalando/postgres-operator/blob/master/ui/operator_ui/main.py
extraEnvs:
[]
# Exemple of settings to make snapshot view working in the ui when using AWS
# - name: WALE_S3_ENDPOINT
# value: https+path://s3.us-east-1.amazonaws.com:443
# - name: SPILO_S3_BACKUP_PREFIX
# value: spilo/
# - name: AWS_ACCESS_KEY_ID
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_ACCESS_KEY_ID
# - name: AWS_SECRET_ACCESS_KEY
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_SECRET_ACCESS_KEY
# - name: AWS_DEFAULT_REGION
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_DEFAULT_REGION
# - name: SPILO_S3_BACKUP_BUCKET
# value: <s3 bucket used by the operator>
# configure UI service
service:
type: "NodePort"
port: "8081"
# If the type of the service is NodePort a port can be specified using the nodePort field
# If the nodePort field is not specified, or if it has no value, then a random port is used
# nodePort: 32521
annotations:
{}
# configure UI ingress. If needed: "enabled: true"
ingress:
enabled: false
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
ingressClassName: ""
hosts:
- host: ui.example.org
paths: [""]
tls: []
# - secretName: ui-tls
# hosts:
# - ui.exmaple.org

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.50.101
path: /nfs_share/postgres-operator-zalando
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
spec:
storageClassName: ""
volumeName: postgres-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: external-postgres-operator-ui
spec:
ports:
- protocol: TCP
port: 80
targetPort: 8081
---
apiVersion: v1
kind: Endpoints
metadata:
name: external-postgres-operator-ui
subsets:
- addresses:
- ip: 192.168.80.81
ports:
- port: 8081

View File

@@ -0,0 +1,13 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: postgres-operator-ui-http-alldcs
spec:
entryPoints:
- web
routes:
- match: Host(`postgres-operator.alldcs.nl`)
kind: Rule
services:
- name: external-postgres-operator-ui
port: 80

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: odroid-tls-alldcs
spec:
entryPoints:
- websecure
routes:
- match: Host(`odroid.alldcs.nl`)
kind: Rule
services:
- name: external-nginx
port: 80
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-redis
title: Redis (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,21 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: redis-alldcs-tls
namespace: redis
spec:
entryPoints:
- websecure
routes:
- match: Host(`redis-odroid.alldcs.nl`)
kind: Rule
services:
- name: redis-insight
port: 5540
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-insight
namespace: redis
labels:
app: redis-insight
spec:
replicas: 1
selector:
matchLabels:
app: redis-insight
template:
metadata:
labels:
app: redis-insight
spec:
containers:
- name: redis-insight
image: redis/redisinsight
ports:
- containerPort: 5540
env:
---
apiVersion: v1
kind: Service
metadata:
name: redis-insight
namespace: redis
labels:
name: redis-insight
spec:
type: ClusterIP
ports:
- port: 5540
name: http
selector:
app: redis-insight

210
odroid/redis/redis.yaml Executable file
View File

@@ -0,0 +1,210 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-configuration
namespace: redis
labels:
app: redis
data:
master.conf: |
maxmemory 400mb
maxmemory-policy allkeys-lru
maxclients 20000
timeout 300
appendonly no
dbfilename dump.rdb
dir /data
slave.conf: |
replicaof redis-0.redis.default.svc.cluster.local 6379
maxmemory 400mb
maxmemory-policy allkeys-lru
maxclients 20000
timeout 300
dir /data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis
namespace: redis
spec:
serviceName: "redis"
replicas: 3
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
initContainers:
- name: init-redis
image: redis:latest
command:
- bash
- "-c"
- |
set -ex
# Generate redis server-id from pod ordinal index.
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
# Copy appropriate redis config files from config-map to respective directories.
if [[ $ordinal -eq 0 ]]; then
cp /mnt/master.conf /etc/redis-config.conf
else
cp /mnt/slave.conf /etc/redis-config.conf
fi
volumeMounts:
- name: redis-pvc
mountPath: /etc
subPath: redis-claim
- name: config-map
mountPath: /mnt/
containers:
- name: redis
image: redis:latest
ports:
- containerPort: 6379
name: redis
command:
- redis-server
- "/etc/redis-config.conf"
volumeMounts:
- name: redis-pvc
mountPath: /data
subPath: redis-data
- name: redis-pvc
mountPath: /etc
subPath: redis-claim
volumes:
- name: config-map
configMap:
name: redis-configuration
volumeClaimTemplates:
- metadata:
name: redis-pvc
spec:
storageClassName: ""
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: redis
labels:
app: redis
spec:
ports:
- port: 6379
targetPort: redis
clusterIP: None
selector:
app: redis
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-pv-0
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/redis-0
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-pv-1
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/redis-1
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-pv-2
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/redis-2
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc-redis-0
namespace: redis
spec:
storageClassName: ""
volumeName: redis-pv-0
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc-redis-1
namespace: redis
spec:
storageClassName: ""
volumeName: redis-pv-1
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-pvc-redis-2
namespace: redis
spec:
storageClassName: ""
volumeName: redis-pv-2
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

36
odroid/traefik/README.md Executable file
View File

@@ -0,0 +1,36 @@
1) traefik installeren via helmchart:
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
kubectl create namespace traefik
2) persistent storage aanmaken:
kubect apply -f traefik-pvc
When enabling persistence for certificates, permissions on acme.json can be
lost when Traefik restarts. You can ensure correct permissions with an
initContainer. See https://github.com/traefik/traefik-helm-chart/blob/master/EXAMPLES.md#use-traefik-native-lets-encrypt-integration-without-cert-manager
3) Installeren
helm install traefik traefik/traefik -f values.yaml -n traefik
CHECK OF PORTFORWARDING VAN POORT 80 en 443 OP DE ROUTER NAAR DE LOADBALANCER GOED STAAT!
HERSTART NA WIJZIGING DE KPN-ROUTER!
4) TLS verzwaren (tlsoption.yml is afkomstig van whoami-voorbeeld)
kubectl apply -f tlsoption.yaml
7) Daschboard toegankelijk maken (dashboard.yaml is afkomstig van helm-documentatie van traefik zelf)
kubectl apply -f ingressroute-dashboard.yaml - n traefik
#migreren:
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: odroid-traefik
title: Traefik (odroid)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik
rules:
- apiGroups: ["traefik.io"]
resources: ["ingressroutes", "ingressroutesstatus"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["services", "endpoints", "pods", "secrets"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik-ingressroute
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: traefik
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik-odroid.allarddcs.nl`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`))
kind: Rule
services:
- name: api@internal
kind: TraefikService

8
odroid/traefik/tlsoption.yaml Executable file
View File

@@ -0,0 +1,8 @@
apiVersion: traefik.io/v1alpha1
kind: TLSOption
metadata:
name: tsloption
namespace: traefik
spec:
minVersion: VersionTLS12

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: traefik-pv
spec:
storageClassName: ""
capacity:
storage: 128Mi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/traefik/odroid
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: traefik-pvc
namespace: traefik
spec:
storageClassName: ""
volumeName: traefik-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 128Mi

1130
odroid/traefik/values.org Normal file

File diff suppressed because it is too large Load Diff

229
odroid/traefik/values.yaml Normal file
View File

@@ -0,0 +1,229 @@
USER-SUPPLIED VALUES:
additionalArguments: []
additionalVolumeMounts: []
affinity: {}
autoscaling:
enabled: false
certificatesResolvers:
letsencrypt:
acme:
email: admin@allarddcs.nl
storage: /data/acme.json
httpChallenge:
entryPoint: web
commonLabels: {}
core:
defaultRuleSyntax: v2
deployment:
additionalContainers: []
additionalVolumes: []
annotations: {}
dnsConfig: {}
enabled: true
imagePullSecrets: []
initContainers:
- name: volume-permissions
image: busybox:latest
command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]
volumeMounts:
- mountPath: /data
name: data
kind: Deployment
labels: {}
lifecycle: {}
minReadySeconds: 0
podAnnotations: {}
podLabels: {}
replicas: 1
runtimeClassName: null
shareProcessNamespace: false
terminationGracePeriodSeconds: 60
env:
envFrom: []
experimental:
kubernetesGateway:
enabled: false
plugins: {}
extraObjects: []
globalArguments:
- --global.checknewversion
- --global.sendanonymoususage
hostNetwork: false
image:
pullPolicy: Always
registry: docker.io
repository: traefik
tag: ""
ingressClass:
enabled: true
isDefaultClass: true
ingressRoute:
dashboard:
annotations: {}
enabled: true
entryPoints:
- traefik
labels: {}
matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
middlewares: []
tls: {}
healthcheck:
annotations: {}
enabled: true
entryPoints:
- traefik
labels: {}
matchRule: PathPrefix(`/ping`)
middlewares: []
tls: {}
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
logs:
access:
enabled: false
fields:
general:
defaultmode: keep
names: {}
headers:
defaultmode: drop
names: {}
filters: {}
general:
level: ERROR
#metrics:
# prometheus:
# entryPoint: metrics
nodeSelector: {}
persistence:
enabled: false
existingClaim: traefik-pvc
path: /data
podDisruptionBudget:
enabled: false
podSecurityContext:
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
podSecurityPolicy:
enabled: false
ports:
# metrics:
# exposedPort: 9100
# port: 9100
# protocol: TCP
traefik:
expose:
default: true
exposedPort: 9000
port: 9000
protocol: TCP
web:
expose:
default: true
exposedPort: 80
port: 8000
protocol: TCP
allowACMEByPass: true
websecure:
respondingTimeouts:
readTimeout: 600
expose:
default: true
exposedPort: 443
http3:
enabled: false
middlewares: []
port: 8443
protocol: TCP
allowACMEByPass: true
tls:
certResolver: ""
domains: []
enabled: true
options: ""
transport:
respondingTimeouts:
readTimeout: 0 # @schema type:[string, integer, null]
writeTimeout: 0 # @schema type:[string, integer, null]
idleTimeout: 0 # @schema type:[string, integer, null]
lifeCycle:
requestAcceptGraceTimeout: # @schema type:[string, integer, null]
graceTimeOut: # @schema type:[string, integer, null]
keepAliveMaxRequests: # @schema type:[integer, null]; minimum:0
keepAliveMaxTime: # @schema type:[string, integer, null]
priorityClassName: ""
providers:
file:
content: ""
enabled: false
watch: true
kubernetesCRD:
allowCrossNamespace: false
allowEmptyServices: false
allowExternalNameServices: false
enabled: true
namespaces: []
kubernetesIngress:
allowEmptyServices: false
allowExternalNameServices: false
disableIngressClassLookup: false
enabled: true
namespaces: []
publishedService:
enabled: false
rbac:
enabled: true
namespaced: false
secretResourceNames: []
readinessProbe:
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
service:
additionalServices: {}
annotations: {}
annotationsTCP: {}
annotationsUDP: {}
enabled: true
externalIPs: []
labels: {}
loadBalancerSourceRanges: []
single: true
spec: {}
type: LoadBalancer
serviceAccount:
name: ""
serviceAccountAnnotations: {}
startupProbe: null
tlsOptions: {}
tlsStore: {}
tolerations: []
topologySpreadConstraints: []
tracing: {}
updateStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
volumes: []