initial commit

This commit is contained in:
allard
2025-11-23 18:58:51 +01:00
commit 376a944abc
1553 changed files with 314731 additions and 0 deletions

116
lp/backstage/backstage.yaml Normal file
View File

@@ -0,0 +1,116 @@
# kubernetes/backstage.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: backstage
namespace: backstage
spec:
replicas: 1
selector:
matchLabels:
app: backstage
template:
metadata:
labels:
app: backstage
spec:
containers:
- name: backstage
image: roadiehq/community-backstage-image
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 7000
env:
- name: POSTGRES_USER
value: backstage
- name: POSTGRES_PASSWORD
value: backstage
- name: POSTGRES_DB
value: backstage
- name: POSTGRES_SERVICE_HOST
value: postgres13.postgres
- name: POSTGRES_SERVICE_PORT
value: "5432"
- name: GITEA_TOKEN
value:
- name: APP_CONFIG_auth_environment
value: development
- name: APP_CONFIG_provider_github_development_clientID
value: "Ov23linGjyUcqg1CpmlF"
- name: APP_CONFIG_provider_github_development_clientSecret
value: "3cc49adb5168df96662e0ad00db74ed1913849ce"
volumeMounts:
- mountPath: /usr/src/app/app-config.yaml
subPath: app-config.yaml
name: app-config
volumes:
- name: app-config
configMap:
name: app-config
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: backstage-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/backstage
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backstage-pvc
namespace: backstage
spec:
storageClassName: ""
volumeName: backstage-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: backstage
namespace: backstage
spec:
type: NodePort
selector:
app: backstage
ports:
- name: http
port: 80
targetPort: 7000
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: backstage-tls
namespace: backstage
spec:
entryPoints:
- websecure
routes:
- match: Host(`backstage-lp.allarddcs.nl`)
kind: Rule
services:
- name: backstage
port: 80
tls:
secretName: backstage-lp.allarddcs.nl-tls

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-backstage
title: Backstage (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

16
lp/backstage/certificate.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: backstage-lp.allarddcs.nl-tls
namespace: backstage
spec:
dnsNames:
- backstage-lp.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: backstage-lp.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

119
lp/backstage/configmap.dev Normal file
View File

@@ -0,0 +1,119 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
namespace: backstage
data:
app-config.yaml: |
app:
title: Backstage-Dev
baseUrl: https://backstage-dev.allarddcs.nl # Frontend URL
backend:
baseUrl: https://backstage-dev.allarddcs.nl # Backend URL (API)
listen:
port: 7007
http:
auth:
environment: development
providers:
guest:
development:
allowEveryone: true
cors:
origin: https://backstage-dev.allarddcs.nl
methods: [GET, POST, PUT, DELETE, PATCH]
credentials: true
trustProxy: true
database:
client: pg
connection:
host: ${POSTGRES_SERVICE_HOST}
port: ${POSTGRES_SERVICE_PORT}
user: ${POSTGRES_USER}
password: ${POSTGRES_PASSWORD}
database: ${POSTGRES_DB}
integrations:
gitea:
- host: gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
token: "7c289d89b02489984fc9850411bb26f6ee4e9d37"
logging:
logLevel: info
loggers:
catalog:
level: debug
backend:
level: debug
csp:
connect-src: ["'self'", 'http:', 'https:']
techdocs:
builder: local
generator:
runIn: local
publisher:
type: local
organization:
name: AllardDCS
rollbar:
organization: my-company
# NOTE: The rollbar-backend & accountToken key may be deprecated in the future (rep>
accountToken: my-rollbar-account-token
auth:
environment: development
providers:
guest:
development:
allowEveryone: true # Guest auth enabled
permission:
enabled: false # Disable permission enforcement
catalog:
providers:
gitea:
yourProviderId:
organization: 'allarddcs' # string
host: gitea-dev.allarddcs.nl
branch: 'master' # Optional, defaults to 'main'
catalogPath: 'catalog-info.yaml' # Optional, defaults to catalog-info.yaml
schedule:
# supports cron, ISO duration, "human duration" as used in code
frequency: { minutes: 30 }
# supports ISO duration, "human duration" as used in code
timeout: { minutes: 3 }
locations:
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/group.yaml
rules:
- allow: [Group]
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/system.yaml
rules:
- allow: [System]
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/catalog-info.yaml
rules:
- allow: [Component]
processors:
gitea:
- host: gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
kubernetes:
serviceLocatorMethod:
type: multiTenant
clusterLocatorMethods:
- type: config
clusters:
- name: local-cluster
url: https://kubernetes.default.svc
authProvider: serviceAccount

386
lp/backstage/configmap.yaml Normal file
View File

@@ -0,0 +1,386 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
namespace: backstage
data:
app-config.yaml: |
app:
title: Backstage Voorbeeld App
baseUrl: https://backstage-lp.allarddcs.nl
googleAnalyticsTrackingId: # UA-000000-0
#datadogRum:
# clientToken: '123456789'
# applicationId: qwerty
# site: # datadoghq.eu default = datadoghq.com
# env: # optional
support:
url: https://github.com/backstage/backstage/issues # Used by common ErrorPage
items: # Used by common SupportButton component
- title: Issues
icon: github
links:
- url: https://github.com/backstage/backstage/issues
title: GitHub Issues
- title: Discord Chatroom
icon: chat
links:
- url: https://discord.gg/MUpMjP2
title: '#backstage'
backend:
baseUrl: https://backstage-lp.allarddcs.nl
listen:
port: 7000
database:
client: pg
connection:
host: postgres13.postgres
port: 5432
user: backstage
password: backstage
cache:
store: memory
cors:
origin: http://localhost:3000
methods: [GET, POST, PUT, DELETE]
credentials: true
csp:
connect-src: ["'self'", 'http:', 'https:']
# Content-Security-Policy directives follow the Helmet format: https://helmetjs.github.io/#reference
# Default Helmet Content-Security-Policy values can be removed by setting the key to false
reading:
allow:
- host: example.com
- host: '*.mozilla.org'
# workingDirectory: /tmp # Use this to configure a working directory for the scaffolder, defaults to the OS temp-dir
# See README.md in the proxy-backend plugin for information on the configuration format
proxy:
'/circleci/api':
target: https://circleci.com/api/v1.1
headers:
Circle-Token: ${CIRCLECI_AUTH_TOKEN}
'/jenkins/api':
target: http://localhost:8080
headers:
Authorization: ${JENKINS_BASIC_AUTH_HEADER}
'/travisci/api':
target: https://api.travis-ci.com
changeOrigin: true
headers:
Authorization: ${TRAVISCI_AUTH_TOKEN}
travis-api-version: '3'
'/newrelic/apm/api':
target: https://api.newrelic.com/v2
headers:
X-Api-Key: ${NEW_RELIC_REST_API_KEY}
'/pagerduty':
target: https://api.pagerduty.com
headers:
Authorization: Token token=${PAGERDUTY_TOKEN}
'/buildkite/api':
target: https://api.buildkite.com/v2/
headers:
Authorization: ${BUILDKITE_TOKEN}
'/sentry/api':
target: https://sentry.io/api/
allowedMethods: ['GET']
headers:
Authorization: ${SENTRY_TOKEN}
'/ilert':
target: https://api.ilert.com
allowedMethods: ['GET', 'POST', 'PUT']
allowedHeaders: ['Authorization']
headers:
Authorization: ${ILERT_AUTH_HEADER}
organization:
name: AllardDCS
# Reference documentation http://backstage.io/docs/features/techdocs/configuration
# Note: After experimenting with basic setup, use CI/CD to generate docs
# and an external cloud storage when deploying TechDocs for production use-case.
# https://backstage.io/docs/features/techdocs/how-to-guides#how-to-migrate-from-techdocs-basic-to-recommended-deployment-approach
techdocs:
builder: 'local' # Alternatives - 'external'
generator:
runIn: 'docker'
# dockerImage: my-org/techdocs # use a custom docker image
# pullImage: true # or false to disable automatic pulling of image (e.g. if custom docker login is required)
publisher:
type: 'local' # Alternatives - 'googleGcs' or 'awsS3' or 'azureBlobStorage' or 'openStackSwift'. Read documentation for using alternatives.
sentry:
organization: my-company
rollbar:
organization: my-company
# NOTE: The rollbar-backend & accountToken key may be deprecated in the future (replaced by a proxy config)
accountToken: my-rollbar-account-token
lighthouse:
baseUrl: http://localhost:3003
kubernetes:
serviceLocatorMethod:
type: 'multiTenant'
clusterLocatorMethods:
- type: 'config'
clusters: []
kafka:
clientId: backstage
clusters:
- name: cluster
brokers:
- localhost:9092
integrations:
github:
- host: github.com
token:
$env: GITHUB_TOKEN
gitea:
- host: gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
token: "7c289d89b02489984fc9850411bb26f6ee4e9d37"
gitlab:
- host: gitlab.com
token: ${GITLAB_TOKEN}
bitbucket:
- host: bitbucket.org
username: ${BITBUCKET_USERNAME}
appPassword: ${BITBUCKET_APP_PASSWORD}
azure:
- host: dev.azure.com
token: ${AZURE_TOKEN}
# googleGcs:
# clientEmail: 'example@example.com'
# privateKey: ${GCS_PRIVATE_KEY}
catalog:
rules:
- allow:
- Component
- API
- Resource
- Group
- User
- Template
- System
- Domain
- Location
processors:
githubOrg:
providers:
- target: https://github.com
token:
$env: GITHUB_TOKEN
gitea:
- host: gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
#### Example for how to add your GitHub Enterprise instance using the API:
# - target: https://ghe.example.net
# apiBaseUrl: https://ghe.example.net/api
# token: ${GHE_TOKEN}
ldapOrg:
### Example for how to add your enterprise LDAP server
# providers:
# - target: ldaps://ds.example.net
# bind:
# dn: uid=ldap-reader-user,ou=people,ou=example,dc=example,dc=net
# secret: ${LDAP_SECRET}
# users:
# dn: ou=people,ou=example,dc=example,dc=net
# options:
# filter: (uid=*)
# map:
# description: l
# groups:
# dn: ou=access,ou=groups,ou=example,dc=example,dc=net
# options:
# filter: (&(objectClass=some-group-class)(!(groupType=email)))
microsoftGraphOrg:
### Example for how to add your Microsoft Graph tenant
#providers:
# - target: https://graph.microsoft.com/v1.0
# authority: https://login.microsoftonline.com
# tenantId: ${MICROSOFT_GRAPH_TENANT_ID}
# clientId: ${MICROSOFT_GRAPH_CLIENT_ID}
# clientSecret: ${MICROSOFT_GRAPH_CLIENT_SECRET_TOKEN}
# userFilter: accountEnabled eq true and userType eq 'member'
# groupFilter: securityEnabled eq false and mailEnabled eq true and groupTypes/any(c:c+eq+'Unified')
locations:
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/group.yaml
rules:
- allow: [Group]
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/system.yaml
rules:
- allow: [System]
- type: url
target: https://gitea-dev.allarddcs.nl/allard/kubernetes/raw/branch/master/catalog-info.yaml
rules:
- allow: [Component]
scaffolder:
# Use to customize default commit author info used when new components are created
# defaultAuthor:
# name: Scaffolder
# email: scaffolder@backstage.io
# Use to customize the default commit message when new components are created
# defaultCommitMessage: 'Initial commit'
github:
token:
$env: GITHUB_TOKEN
visibility: public # or 'internal' or 'private'
gitea:
api:
baseUrl: https://gitea-dev.allarddcs.nl
token: ${GITEA_TOKEN}
visibility: public # or 'internal' or 'private'
gitlab:
api:
baseUrl: https://gitlab.com
token: ${GITLAB_TOKEN}
visibility: public # or 'internal' or 'private'
azure:
baseUrl: https://dev.azure.com/{your-organization}
api:
token: ${AZURE_TOKEN}
bitbucket:
api:
host: https://bitbucket.org
username: ${BITBUCKET_USERNAME}
token: ${BITBUCKET_TOKEN}
visibility: public # or or 'private'
auth:
environment: development
### Providing an auth.session.secret will enable session support in the auth-backend
# session:
# secret: custom session secret
providers:
google:
development:
clientId: arbitrary-value
clientSecret: arbitrary-value
github:
development:
clientId: arbitrary-value
clientSecret: arbitrary-value
enterpriseInstanceUrl: arbitrary-value
gitlab:
development:
clientId: arbitrary-value
clientSecret: arbitrary-value
audience: arbitrary-value
saml:
entryPoint: 'http://localhost:7001/'
issuer: 'passport-saml'
okta:
development:
clientId: arbitrary-value
clientSecret: arbitrary-value
audience: arbitrary-value
oauth2:
development:
clientId: arbitrary-value
clientSecret: arbitrary-value
authorizationUrl: arbitrary-value
tokenUrl: arbitrary-value
###
# provide a list of scopes as needed for your OAuth2 Server:
#
# scope: saml-login-selector openid profile email
oidc:
# Note that you must define a session secret (see above) since the oidc provider requires session support.
# Note that by default, this provider will use the 'none' prompt which assumes that your are already logged on in the IDP.
# You should set prompt to:
# - auto: will let the IDP decide if you need to log on or if you can skip login when you have an active SSO session
# - login: will force the IDP to always present a login form to the user
development:
metadataUrl: arbirarty-value
clientId: arbirarty-value
clientSecret: arbirarty-value
authorizationUrl: arbirarty-value
tokenUrl: arbirarty-value
tokenSignedResponseAlg: arbirarty-value
scope: arbirarty-value # default='openid profile email'
prompt: arbirarty-value # default=none (allowed values: auto, none, consent, login)
auth0:
development:
clientId: arbirarty-value
clientSecret: arbirarty-value
domain: arbirarty-value
microsoft:
development:
clientId: arbirarty-value
clientSecret: arbirarty-value
tenantId: arbirarty-value
onelogin:
development:
clientId: arbirarty-value
clientSecret: arbirarty-value
issuer: arbirarty-value
costInsights:
engineerCost: 200000
products:
computeEngine:
name: Compute Engine
icon: compute
cloudDataflow:
name: Cloud Dataflow
icon: data
cloudStorage:
name: Cloud Storage
icon: storage
bigQuery:
name: BigQuery
icon: search
events:
name: Events
icon: data
metrics:
DAU:
name: Daily Active Users
default: true
MSC:
name: Monthly Subscribers
homepage:
clocks:
- label: UTC
timezone: UTC
- label: NYC
timezone: 'America/New_York'
- label: STO
timezone: 'Europe/Stockholm'
- label: TYO
timezone: 'Asia/Tokyo'
pagerduty:
eventsBaseUrl: 'https://events.pagerduty.com/v2'
jenkins:
instances:
- name: default
baseUrl: https://jenkins.example.com
username: backstage-bot
apiKey: 123456789abcdef0123456789abcedf012

32
lp/catalog-info.yaml Normal file
View File

@@ -0,0 +1,32 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: LP-cluster
namespace: default
description: Alle deployment van LP-cluster in github
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/AllardKrings/kubernetes
title: AllardDCS Kubernetes Configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
- ./coturn/catalog-info.yaml
- ./clair/catalog-info.yaml
- ./quay/catalog-info.yaml
- ./traefik/catalog-info.yaml
- ./roundcube/catalog-info.yaml
- ./matrix/catalog-info.yaml
- ./drupal/catalog-info.yaml
- ./kubernetes/catalog-info.yaml
- ./pgadmin/catalog-info.yaml
- ./backstage/catalog-info.yaml
- ./postfixadmin/catalog-info.yaml
- ./dockermail/catalog-info.yaml
- ./mongodb/catalog-info.yaml
- ./postgres13/catalog-info.yaml
- ./nfs/catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-clair
title: Clair (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,408 @@
---
# Source: clair/charts/postgresql/templates/networkpolicy.yaml
kind: NetworkPolicy
apiVersion: "networking.k8s.io/v1"
metadata:
name: clair-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: clair
ingress:
# Allow inbound connections
- ports:
- port: 5432
---
# Source: clair/charts/postgresql/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: clair-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
postgresql-postgres-password: "YjBRQTI1QjdnRw=="
postgresql-password: "Y2xhaXI="
---
# Source: clair/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: clair-clair
labels:
app.kubernetes.io/name: clair
helm.sh/chart: clair-0.2.9
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
data:
config.yaml: |
clair:
database:
# Database driver
type: pgsql
options:
# PostgreSQL Connection string
# https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
# This should be done using secrets or Vault, but for now this will also work
source: "postgres://postgres:clair@clair-postgresql:5432/postgres?sslmode=disable"
# Number of elements kept in the cache
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
cachesize: 16384
# 32-bit URL-safe base64 key used to encrypt pagination tokens
# If one is not provided, it will be generated.
# Multiple clair instances in the same cluster need the same value.
paginationkey: ""
api:
# v3 grpc/RESTful API server address
addr: "0.0.0.0:6060"
# Health server address
# This is an unencrypted endpoint useful for load balancers to check to healthiness of the clair server.
healthaddr: "0.0.0.0:6061"
# Deadline before an API request will respond with a 503
timeout: 900s
# Optional PKI configuration
# If you want to easily generate client certificates and CAs, try the following projects:
# https://github.com/coreos/etcd-ca
# https://github.com/cloudflare/cfssl
servername:
cafile:
keyfile:
certfile:
worker:
namespace_detectors:
- os-release
- lsb-release
- apt-sources
- alpine-release
- redhat-release
feature_listers:
- apk
- dpkg
- rpm
updater:
# Frequency the database will be updated with vulnerabilities from the default data sources
# The value 0 disables the updater entirely.
interval: "2h"
enabledupdaters:
- debian
- ubuntu
- rhel
- alpine
notifier:
# Number of attempts before the notification is marked as failed to be sent
attempts: 3
# Duration before a failed notification is retried
renotifyinterval: 2h
http:
# Optional endpoint that will receive notifications via POST requests
endpoint: ""
# Optional PKI configuration
# If you want to easily generate client certificates and CAs, try the following projects:
# https://github.com/cloudflare/cfssl
# https://github.com/coreos/etcd-ca
servername:
cafile:
keyfile:
certfile:
# Optional HTTP Proxy: must be a valid URL (including the scheme).
proxy:
---
# Source: clair/charts/postgresql/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: clair-postgresql-headless
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
type: ClusterIP
clusterIP: None
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other Postgresql pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: clair
---
# Source: clair/charts/postgresql/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: clair-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: clair
role: primary
---
# Source: clair/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: clair-clair
labels:
app.kubernetes.io/name: clair
helm.sh/chart: clair-0.2.9
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
spec:
type: NodePort
ports:
- name: clair-api
port: 6060
nodePort: 30060
targetPort: 6060
protocol: TCP
name: "clair-api"
- name: clair-health
port: 6061
nodePort: 30061
targetPort: 6061
protocol: TCP
name: "clair-health"
selector:
app.kubernetes.io/name: clair
app.kubernetes.io/instance: clair
---
# Source: clair/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: clair-clair
labels:
app.kubernetes.io/name: clair
helm.sh/chart: clair-0.2.9
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: clair
app.kubernetes.io/instance: clair
template:
metadata:
labels:
app.kubernetes.io/name: clair
helm.sh/chart: clair-0.2.9
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
spec:
volumes:
- name: "clair-config"
configMap:
name: clair-clair
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: clair
image: "quay.io/coreos/clair:v4.3.6"
imagePullPolicy: IfNotPresent
args:
- "-log-level=debug"
ports:
- name: clair-api
containerPort: 6060
protocol: TCP
- name: clair-health
containerPort: 6061
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 6061
readinessProbe:
httpGet:
path: /health
port: 6061
volumeMounts:
- name: "clair-config"
mountPath: /etc/clair
resources:
limits:
cpu: 2
memory: 3000Mi
requests:
cpu: 50m
memory: 2000Mi
env:
- name: CLAIR_CONF
value: "/clair/config.yaml"
- name: CLAIR_MODE
value: "combo"
---
# Source: clair/charts/postgresql/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: clair-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
annotations:
spec:
serviceName: clair-postgresql-headless
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: clair
role: primary
template:
metadata:
name: clair-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-10.0.0
app.kubernetes.io/instance: clair
app.kubernetes.io/managed-by: Helm
role: primary
spec:
securityContext:
fsGroup: 1001
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: clair-postgresql
image: docker.io/bitnami/postgresql:11.10.0-debian-10-r2
imagePullPolicy: "IfNotPresent"
resources:
limits:
cpu: 2
memory: 512Mi
requests:
cpu: 50m
memory: 512Mi
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: POSTGRESQL_PORT_NUMBER
value: "5432"
- name: POSTGRESQL_VOLUME_DIR
value: "/bitnami/postgresql"
- name: PGDATA
value: "/bitnami/postgresql/data"
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: clair-postgresql
key: postgresql-password
- name: POSTGRESQL_ENABLE_LDAP
value: "no"
- name: POSTGRESQL_ENABLE_TLS
value: "no"
- name: POSTGRESQL_LOG_HOSTNAME
value: "false"
- name: POSTGRESQL_LOG_CONNECTIONS
value: "false"
- name: POSTGRESQL_LOG_DISCONNECTIONS
value: "false"
- name: POSTGRESQL_PGAUDIT_LOG_CATALOG
value: "off"
- name: POSTGRESQL_CLIENT_MIN_MESSAGES
value: "error"
- name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
value: "pgaudit"
ports:
- name: tcp-postgresql
containerPort: 5432
livenessProbe:
exec:
command:
- /bin/sh
- -c
- exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/sh
- -c
- -e
- |
exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: data
mountPath: /bitnami/postgresql
subPath:
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 1Gi
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"

86
lp/clair/helm/values.yaml Normal file
View File

@@ -0,0 +1,86 @@
# Default values for clair.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
logLevel: debug
insecureTls: false
image:
repository: quay.io/coreos/clair
tag: v2.1.6
pullPolicy: IfNotPresent
service:
name: clair
type: ClusterIP
internalApiPort: 6060
externalApiPort: 6060
internalHealthPort: 6061
externalHealthPort: 6061
ingress:
enabled: false
# ingressClassName: ""
# Used to create Ingress record (should used with service.type: ClusterIP).
hosts:
- clair-clair
annotations:
# kubernetes.io/ingress.global-static-ip-name: "test-ip"
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
limits:
cpu: 2
memory: 3000Mi
requests:
cpu: 50m
memory: 2000Mi
config:
#postgresURI: "postgres13.postgres://clair.clair@host:5432/clair?sslmode=disable"
# paginationKey: "XxoPtCUzrUv4JV5dS+yQ+MdW7yLEJnRMwigVY/bpgtQ="
updateInterval: 2h
# notificationWebhookEndpoint: https://example.com/notify/me
enabledUpdaters:
- debian
- ubuntu
- rhel
- oracle
- alpine
enabledNamespaceDetectors:
- os-release
- lsb-release
- apt-sources
- alpine-release
- redhat-release
enabledFeatureListers:
- apk
- dpkg
- rpm
# Configuration values for the postgresql dependency.
# ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
nodeSelector:
kubernetes.io/arch: amd64
postgresql:
# The dependant Postgres chart can be disabled, to connect to
# an existing database by defining config.postgresURI
enabled: true
image:
debug: true
resources:
requests:
cpu: 50m
memory: 512Mi
limits:
cpu: 2
memory: 512Mi
postgresqlUsername: clair
postgresqlPassword: clair
persistence:
size: 1Gi
networkPolicy:
enabled: true
nodeSelector:
kubernetes.io/arch: amd64

13
lp/clair/yaml/README.md Normal file
View File

@@ -0,0 +1,13 @@
=#Postgress13:
installeren van extensies:
- inloggen incontainer:
kubectl exec -it postgres13-0 -n postgres
- inloggen op dadabase clair:
psql -U clair --dbname=clair
- sql uitvoeren:
create extension if not exists "uuid-ossp";
/q
exit

111
lp/clair/yaml/clair.yaml Normal file
View File

@@ -0,0 +1,111 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: clair
namespace: quay
spec:
replicas: 1
selector:
matchLabels:
app: clair
template:
metadata:
labels:
app: clair
spec:
containers:
- image: quay.io/projectquay/clair:4.7.2
name: clairv4
resources:
limits:
memory: 500Mi
cpu: 1
requests:
memory: 200Mi
cpu: 500m
ports:
- containerPort: 6060
name: endpoint
protocol: TCP
- containerPort: 8089
name: health
protocol: TCP
env:
- name: CLAIR_CONF
value: /clair/config.yaml
- name: CLAIR_MODE
value: combo
volumeMounts:
- mountPath: /clair/
name: clair
# livenessProbe:
# httpGet:
# path: /health
# port: 6061
# readinessProbe:
# httpGet:
# path: /health
# port: 6061
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: clair
persistentVolumeClaim:
claimName: clair-pvc
---
apiVersion: v1
kind: Service
metadata:
name: clair
namespace: quay
spec:
ports:
- name: endpoint
port: 6060
nodePort: 30081
protocol: TCP
targetPort: 6060
- name: health
port: 8089
nodePort: 30088
protocol: TCP
targetPort: 8089
selector:
app: clair
type: NodePort
status:
loadBalancer: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: clair-pvc
namespace: quay
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: ""
volumeMode: Filesystem
volumeName: clair-pv
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: clair-pv
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1Gi
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /mnt/nfs_share/clair
server: 192.168.2.110
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem

42
lp/clair/yaml/ingresroutes.yaml Executable file
View File

@@ -0,0 +1,42 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: clair-http
namespace: quay
spec:
entryPoints:
- web
routes:
- match: Host(`clair-lp.alldcs.nl`)
kind: Rule
services:
- name: clair
port: 8081
- match: Host(`clairh-lp.alldcs.nl`)
kind: Rule
services:
- name: health
port: 8088
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: clair-tls
namespace: quay
spec:
entryPoints:
- websecure
routes:
- match: Host(`clair-lp.alldcs.nl`)
kind: Rule
services:
- name: clair
port: 8081
- match: Host(`clairh-lp.alldcs.nl`)
kind: Rule
services:
- name: health
port: 8088
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-coturn
title: Coturn (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

33
lp/coturn/lp/README.md Normal file
View File

@@ -0,0 +1,33 @@
#configuratie:
Ik heb: hostNetwork: true — so ports 3478 (UDP/TCP) and 5349 (TCP) are bound directly on the node network interface.
#ACHTERGRONFINFO
#ICE server (Interactive Connectivity Establishment server)
is a network component used in
#WebRTC (Web Real-Time Communication)
and other peer-to-peer communication protocols to facilitate the establishment of a direct connection
between two devices (peers) over the internet.
ICE is a framework used to handle the complexities of establishing these connections,
especially when peers are behind firewalls or NATs (Network Address Translators).
The main role of an ICE server is to help peers find the best possible path for direct communication.
Here are some key components of ICE:
#STUN (Session Traversal Utilities for NAT):
A STUN server helps clients discover their public-facing IP address and port, which is needed when
they are behind a NAT or firewall. It assists in detecting if the peer is behind a NAT and helps with
establishing connectivity.
#TURN (Traversal Using Relays around NAT):
A TURN server is used when a direct connection cannot be established between peers due to network
restrictions like strict NATs or firewalls.
In this case, the TURN server acts as a relay to route traffic between the peers.
ICE servers (STUN and TURN) work together to ensure the peers can communicate by testing various
potential connection paths and selecting the best one.
In WebRTC, developers often configure ICE servers to make sure the communication is as efficient
as possible, even when the devices are on different networks with possible connectivity barriers

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: coturn-cert
namespace: matrix
spec:
secretName: coturn-cert
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- "coturn-lp.allarddcs.nl"

105
lp/coturn/lp/coturn.yaml Normal file
View File

@@ -0,0 +1,105 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coturn
namespace: matrix
spec:
replicas: 1
selector:
matchLabels:
app: coturn
template:
metadata:
labels:
app: coturn
spec:
# hostNetwork: true
containers:
- name: coturn
image: coturn/coturn:latest
env:
- name: DETECT_EXTERNAL_IP
value: "yes"
- name: DETECT_RELAY_IP
value: "yes"
ports:
- name: turn-udp
containerPort: 3478
protocol: UDP
- name: turn-tcp
containerPort: 3478
protocol: TCP
- name: turns-tcp
containerPort: 5349
protocol: TCP
volumeMounts:
- name: coturn-cert
mountPath: /etc/coturn/certs
readOnly: true
- name: coturn-data
mountPath: /etc/coturn/turnserver.conf
subPath: config/turnserver.conf
- name: coturn-data
mountPath: /var/log
subPath: logs
volumes:
- name: coturn-data
persistentVolumeClaim:
claimName: coturn-pvc
- name: coturn-cert
secret:
secretName: coturn-cert
---
apiVersion: v1
kind: Service
metadata:
name: coturn
namespace: matrix
spec:
type: LoadBalancer
ports:
- name: turn-udp
port: 3478
protocol: UDP
targetPort: 3478
- name: turn-tcp
port: 3478
protocol: TCP
targetPort: 3478
- name: turns-tcp
port: 5349
protocol: TCP
targetPort: 5349
selector:
app: coturn
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: coturn-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: coturn-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: coturn-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/coturn/lp
type: Directory

View File

@@ -0,0 +1,18 @@
import hmac
import hashlib
import base64
import time
# Replace with your actual secret and realm
secret = b'heleenvanderpol'
realm = 'coturn-lp.allarddcs.nl'
# Step 1: Generate a timestamp-based username valid for ~24 hours
username = str(int(time.time()) + 3600 * 24)
# Step 2: Create password using HMAC-SHA1
key = hmac.new(secret, username.encode('utf-8'), hashlib.sha1)
password = base64.b64encode(key.digest()).decode('utf-8')
print("Username:", username)
print("Password:", password)

29
lp/dockermail/README.md Normal file
View File

@@ -0,0 +1,29 @@
kubectl apply -f pvc.yaml
helm repo add docker-mailserver https://docker-mailserver.github.io/docker-mailserver-helm
helm upgrade --install docker-mailserver docker-mailserver/docker-mailserver --namespace mail --create-namespace
kubectl exec -it -n mail deploy/docker-mailserver -- bash
setup email add admin@allarddcs.nl Dockermail01@
setup config dkim keysize 2048 domain 'allarddcs.nl'
2025-01-22 13:09:42+00:00 INFO rspamd-dkim: Here is the content of the TXT DNS record mail._domainkey.allarddcs.nl that you need to create:
v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwiLS07tI7kyAeIiR+hroK2r2v6/5/2CjLY2kzM2RA4nfnb/ZFG5/tYEF34NMBsZ4/fqki+ACUyN+65d1b7qa2Lxl+sj6honkVnmZHmayYhZbRp1odgim4IOdxRhqnJ3S0T3aVN7XLIgDng2/Uoyl/78qlPWMRZFbxe4h2z8iw3KTtTh3IrMsDnWWeatYO+Bx2WJDMc63qKuiZZ2XL4cC6ptrhKHRcAgErZFlUFyrZzfj7LhXx0Mq+6XLJGAGuVgo797qe4WM/y80PjoGQKCM9VduyFJd4du5DbGA9mhsB3Pu8o+MUt17xb/iWkpLMGO/0GBgLwLM5j7lfDwYOcFmDwIDAQAB
rspamd: stopped
rspamd: started
helm upgrade docker-mailserver docker-mailserver/docker-mailserver -n mail
supervisorctl restart postfix
in etc/postfix/generic
@allarddcs.nl @allarddcs.nl

View File

@@ -0,0 +1 @@
v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqRda3lUfPZNi94d4P/82nBtM3rH2KLi7xOOSYm0njK6jtuqfJEvA5Mp3SH8JP7AtOamVXHGqsK1AWxCjPcyRGsHaenp6pDzBToNWbWTbgL/yfI1YIfHagUfpR+1EIUyaIb1NsFOSuEOVecSXQqPnPFVAYAcobDF/5q5z1eM0uPNHyTH0HGVlH038O8tH/INkjmVUa1l6IttdYVeU942J2n4p7hFNqzSc9EcLmaS2Q8DEB1+5i02vhu62qKojYSYo+jr5ThhqOQEy07xOCCenGq7yps9nHbvgmt9m6RlfxIJ9SwphzPm9zIGuK4hxt8MrFD408L+eympWRrDFLfm4owIDAQAB

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-dockermail
title: Dockermail (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,64 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-mailserver-ingress
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web, websecure
traefik.ingress.kubernetes.io/router.rule: "Host(`mail.allarddcs.nl`)"
spec:
rules:
- host: mail.allarddcs.nl
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: docker-mailserver
port:
number: 25
# Add other ports for IMAP, POP3, etc.
- host: mail.allarddcs.nl
tcp:
- port: 25
backend:
service:
name: docker-mailserver
port:
number: 25
- port: 465
backend:
service:
name: docker-mailserver
port:
number: 465
- port: 587
backend:
service:
name: docker-mailserver
port:
number: 587
- port: 993
backend:
service:
name: docker-mailserver
port:
number: 993
- port: 143
backend:
service:
name: docker-mailserver
port:
number: 143
- port: 110
backend:
service:
name: docker-mailserver
port:
number: 110
- port: 995
backend:
service:
name: docker-mailserver
port:
number: 995

View File

@@ -0,0 +1,23 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: ingressroute-tcp-dockermailserver
namespace: mail # Use the appropriate namespace where your Mailserver is deployed
spec:
entryPoints:
- smtp
- imap
- pop3
routes:
- match: HostSNI(`*`) # This will match all hostnames, change it to match a specific domain if needed
services:
- name: docker-mailserver # Replace with your actual mailserver service name
port: 25 # SMTP port for Mailserver (adjust as needed)
- match: HostSNI(`*`) # IMAP route
services:
- name: docker-mailserver
port: 143 # IMAP port for Mailserver (adjust as needed)
- match: HostSNI(`*`) # POP3 route
services:
- name: docker-mailserver
port: 110 # POP3 port for Mailserver (adjust as needed)

1
lp/dockermail/logs.sh Executable file
View File

@@ -0,0 +1 @@
microk8s kubectl logs docker-mailserver-0 -n mail

124
lp/dockermail/pvc.yaml Executable file
View File

@@ -0,0 +1,124 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mail-docker-mailserver-mail-config-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/dockermail/config
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mail-docker-mailserver-mail-config-pvc
namespace: mail
spec:
storageClassName: ""
volumeName: mail-docker-mailserver-mail-config-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mail-docker-mailserver-mail-data-pv
spec:
storageClassName: ""
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/dockermail/data
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mail-docker-mailserver-mail-data-pvc
namespace: mail
spec:
storageClassName: ""
volumeName: mail-docker-mailserver-mail-data-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mail-docker-mailserver-mail-log-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/dockermail/log
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mail-docker-mailserver-mail-log-pvc
namespace: mail
spec:
storageClassName: ""
volumeName: mail-docker-mailserver-mail-log-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mail-docker-mailserver-mail-state-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/dockermail/state
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mail-docker-mailserver-mail-state-pvc
namespace: mail
spec:
storageClassName: ""
volumeName: mail-docker-mailserver-mail-state-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

1
lp/dockermail/setup.sh Normal file
View File

@@ -0,0 +1 @@
setup email add admin@allarddcs.nl Dockermail01@

View File

@@ -0,0 +1,17 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: mail-certificate
namespace: mail # Replace with your desired namespace (in this case, "mail")
spec:
secretName: mail-certificate-secret # The name of the Kubernetes secret to store the cert and key
issuerRef:
name: letsencrypt # Reference to the ClusterIssuer you created earlier
kind: ClusterIssuer
commonName: mail.allarddcs.nl # The common name for your mail domain
dnsNames:
- mail.allarddcs.nl # Additional DNS names (e.g., subdomains)
usages:
- digital signature
- key encipherment
- server auth

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-drupal
title: Drupal (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

146
lp/drupal/drupal.yaml Normal file
View File

@@ -0,0 +1,146 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
replicas: 1
selector:
matchLabels:
app: drupal
template:
metadata:
labels:
app: drupal
spec:
initContainers:
- name: init-sites-volume
image: drupal
command: ['/bin/bash', '-c']
args: ['chown www-data:www-data /var/www/html/sites -R']
volumeMounts:
- name: drupal-data
mountPath: /var/www/html/sites
subPath: sites
containers:
- name: drupal
image: drupal
imagePullPolicy: Always
env:
- name: ServerName
value: drupal-lp.alldcs.nl
# - name: GITEA__database__DB_TYPE
# value: mysql
# - name: GITEA__database__HOST
# value: mariadb.mariadb:3306
# - name: GITEA__database__NAME
# value: gitea
# - name: GITEA__database__USER
# value: gitea
# - name: GITEA__database__PASSWD
# value: gitea
ports:
- containerPort: 80
volumeMounts:
- name: drupal-data
mountPath: /var/www/html/modules
subPath: modules
- name: drupal-data
mountPath: /var/www/html/profiles
subPath: profiles
- name: drupal-data
mountPath: /var/www/html/themes
subPath: themes
- name: drupal-data
mountPath: /var/www/html/sites
subPath: sites
volumes:
- name: drupal-data
persistentVolumeClaim:
claimName: drupal-pvc
---
apiVersion: v1
kind: Service
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
sessionAffinity: None
ports:
- protocol: TCP
port: 80
selector:
app: drupal
type: LoadBalancer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: drupal-tls
namespace: drupal
spec:
entryPoints:
- websecure
routes:
- match: Host(`drupal-lp.alldcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: drupal-http
namespace: drupal
spec:
entryPoints:
- web
routes:
- match: Host(`drupal-lp.alldcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drupal-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/drupal/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drupal-pvc
namespace: drupal
spec:
storageClassName: ""
volumeName: drupal-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

17
lp/kubernetes/README.md Executable file
View File

@@ -0,0 +1,17 @@
3) microk8s enable dashboard
2) creer account: kubectl apply -f ServiceAccount.yaml
3) creeer clusterrolebinding: kubectl aply -f ClusterRoleBinding.yaml
4) creeer ingressroute: kubectl apply -f Ingressroute-tls.yaml
5) genereer token:
kubectl -n kube-system create token admin-user --duration=8544h
Herinstallatie:
na herinstallatie moet je de config opnieuw kopieren anders klopt het certificaat niet meer:
sudo cp -i /var/snap/microk8s/current/credentials/client.config ${HOME}/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/config

38
lp/kubernetes/TIPS.md Executable file
View File

@@ -0,0 +1,38 @@
#Als een pvc in de status "terminating" blijft hangen kan het volgende commando
#helpen:
kubectl patch pvc {PVC_NAME} -p '{"metadata":{"finalizers":null}}'
#Switchen van context:
kubectl config set-context --current --namespace=tektontutorial
#Als je bij uitvoeren van kubectl "connection refused " krijgt
#kunnen de volgende commando's helpen:
sudo microk8s.refresh-certs --cert ca.crt
sudo microk8s.refresh-certs --cert server.crt
aanpassen clusternaam:
nano /var/snap/micrk8s/current/credentials/client.config
Daarna certificaten opnieuw genereren:
sudo microk8s.refresh-certs --cert ca.crt
sudo microk8s.refresh-certs --cert server.crt
kubectl configuratie opnieuw genereren:
microk8s.kubectl config view --raw > $HOME/.kube/config
#metallb speaker permission errors
sudo nano /etc/apparmor.d/cri-containerd.apparmor.d
network,
sudo apparmor_parser -r /etc/apparmor.d/cri-containerd.apparmor.d
#volle schijf:
sudo microk8s ctr images list -q | xargs -r sudo microk8s ctr images rm

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-kubernetes
title: Kubernetes (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

16
lp/kubernetes/certificate.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubernetes-lp.allarddcs.nl-tls
namespace: kube-system
spec:
dnsNames:
- kubernetes-lp.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: kubernetes-lp.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

1
lp/kubernetes/create-token.sh Executable file
View File

@@ -0,0 +1 @@
microk8s kubectl -n kube-system create token admin-user --duration=8544h

View File

@@ -0,0 +1,31 @@
apiVersion: traefik.io/v1alpha1
kind: ServersTransport
metadata:
name: kubernetes-dashboard-transport
namespace: kube-system
spec:
serverName: kubernetes-dashboard
insecureSkipVerify: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kubernetes-dashboard-tls
namespace: kube-system
spec:
entryPoints: # [1]
- websecure
routes: # [2]
- kind: Rule
match: Host(`kubernetes-lp.allarddcs.nl`) # [3]
priority: 10 # [4]
services: # [8]
- kind: Service
name: kubernetes-dashboard
namespace: kube-system
port: 443 # [9]
serversTransport: kubernetes-dashboard-transport
tls: # [11]
secretName: kubernetes-lp.allarddcs.nl-tls
# certResolver: letsencrypt

View File

@@ -0,0 +1 @@
eyJhbGciOiJSUzI1NiIsImtpZCI6IlRxZzNuLW5jeThFYTJtN0xSN2VhWF96Y1dpcTRRZVZ2ZUMtOFpRNThfb2cifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTcyMTg5MTgxOSwiaWF0IjoxNjkxMTMzNDE5LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6Imt1YmUtc3lzdGVtIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImFkbWluLXVzZXIiLCJ1aWQiOiJkMjZiMDk3MS1kYmYwLTRhODYtOGUyMy0yYTU5MTEyY2M4OGMifX0sIm5iZiI6MTY5MTEzMzQxOSwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmFkbWluLXVzZXIifQ.GrwQolQgSrZ9yhXe54u6GcpksbgDk1C-sMJOKpDrKdr7FQGCNjq68cWuqWa-Fajtv39azFDzf5BiHsntcKeDMAFLJRGelKiM-yBqvD3cdXOU6fuhG-rUfnjVKAzJlVx95d5OJElgIqZ7bV1CnoLxuG5FTjhFKOzIJgMRvFZw8b4UJzbJF-SOASnOk4lEr_NYNJYdEIJB2VRWo-4nejohCEV4-ySHZikxBG88Di75Jkx2nUWlX_xCKvAdMe6LhZ9wA-DZGwtNEJZ_dwdUUCUcwzZ9M_i8TewujzrM6I4rPaSfEfvFJfj_pEOfZHQ0XhRYq2grxLppdwnYQzW5T2-suw

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-matrix
title: Matrix (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

72
lp/matrix/lp/README.md Normal file
View File

@@ -0,0 +1,72 @@
#Installatie:
You only need to expose port 443 (HTTPS) on your public IP.
All Matrix client communication and server-to-server federation is done over HTTPS.
Dus ingressroute van entrypoint 443 naar poort 8008 is voldoende.
Je hoeft niks open te zetten op de internet router
#configuratie STUN & TURN:
als je de container directory "data" mount op een pvc dan vind je daar de bestanden:
homeserver.db homeserver.db-wal matrix-lp.allarddcs.nl.log.config media_store
homeserver.db-shm homeserver.yaml matrix-lp.allarddcs.nl.signing.key
homeserver.yaml bevat:
turn_uris:
- "turn:coturn-lp.allarddcs.nl:3478?transport=udp"
- "turn:coturn-lp.allarddcs.nl:3478?transport=tcp"
stun_uris:
- "stun:stun.l.google.com:19302"
- "stun:stun1.l.google.com:19302"
- "stun:stun2.l.google.com:19302"
#registratie admin:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u admin -p Matrix01@ \
-a -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#registratie gewone gebruiker:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u diederick -p Matrix01@ \
--no-admin -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#algemeen:
usage: register_new_matrix_user [-h] [-u USER] [--exists-ok] [-p PASSWORD | --password-file PASSWORD_FILE] [-t USER_TYPE] [-a | --no-admin] (-c CONFIG | -k SHARED_SECRET)
[server_url]
Used to register new users with a given homeserver when registration has been disabled. The homeserver must be configured with
the 'registration_shared_secret' option set.
positional arguments:
server_url URL to use to talk to the homeserver. By default, tries to find a suitable URL from the configuration
file. Otherwise, defaults to 'http://localhost:8008'.
options:
-h, --help show this help message and exit
-u USER, --user USER Local part of the new user. Will prompt if omitted.
--exists-ok Do not fail if user already exists.
-p PASSWORD, --password PASSWORD
New password for user. Will prompt for a password if this flag and `--password-file` are both omitted.
--password-file PASSWORD_FILE
File containing the new password for user. If set, will override `--password`.
-t USER_TYPE, --user_type USER_TYPE
User type as specified in synapse.api.constants.UserTypes
-a, --admin Register new user as an admin. Will prompt if --no-admin is not set either.
--no-admin Register new user as a regular user. Will prompt if --admin is not set either.
-c CONFIG, --config CONFIG
Path to server config file. Used to read in shared secret.
-k SHARED_SECRET, --shared-secret SHARED_SECRET
Shared secret as defined in server config file.
#COTURN:
#check udp:
nc -zvu coturn-lp.allarddcs.nl 3478
nc -zv coturn-lp.allarddcs.nl 3478
nc -zv coturn-lp.allarddcs.nl 5349
#checken certificaat:
kubectl describe secret coturn-cert -n matrix

98
lp/matrix/lp/element.yaml Executable file
View File

@@ -0,0 +1,98 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: element
namespace: matrix
labels:
app: element
spec:
replicas: 1
selector:
matchLabels:
app: element
template:
metadata:
labels:
app: element
spec:
containers:
- name: element
image: vectorim/element-web
securityContext:
runAsUser: 0 # Run as root (user ID 0)
runAsGroup: 0 # Run as root group (group ID 0)
ports:
- containerPort: 80
env:
- name: SYNAPSE_SERVER
value: "https://matrix-lp.allarddcs.nl"
- name: BASE_URL
value: "https://element-lp.allarddcs.nl"
volumeMounts:
- name: element-config
mountPath: /config
volumes:
- name: element-config
configMap:
name: element-config
---
apiVersion: v1
kind: ConfigMap
metadata:
name: element-config
data:
config.json: |
{
"default_homeserver": "https://matrix-lp.allarddcs.nl",
"branding": {
"brand": "AllardDCS",
"icon": "/path/to/logo.png"
}
}
---
apiVersion: v1
kind: Service
metadata:
name: element
namespace: matrix
spec:
ports:
- name: http
targetPort: 80
port: 80
selector:
app: element
type: ClusterIP
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: element-http
namespace: matrix
spec:
entryPoints:
- web
routes:
- match: Host(`element-lp.allarddcs.nl`)
kind: Rule
services:
- name: element
port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: element-tls
namespace: matrix
spec:
entryPoints:
- websecure
routes:
- match: Host(`element-lp.allarddcs.nl`)
kind: Rule
services:
- name: element
port: 80
tls:
secretName: element-lp.allarddcs.nl-tls

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: acme-challenge
namespace: cert-manager
spec:
entryPoints:
- web
routes:
- match: PathPrefix(`/\.well-known/acme-challenge/`)
kind: Rule
services:
- name: cert-manager
port: 80

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: matrix-lp.allarddcs.nl-tls
namespace: matrix
spec:
dnsNames:
- matrix-lp.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: matrix-lp.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

116
lp/matrix/lp/matrix.yaml Executable file
View File

@@ -0,0 +1,116 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix
namespace: matrix
labels:
app: matrix
spec:
replicas: 1
selector:
matchLabels:
app: matrix
template:
metadata:
labels:
app: matrix
spec:
containers:
- name: matrix
image: matrixdotorg/synapse:latest
# args:
# - generate
env:
- name: SYNAPSE_SERVER_NAME
value: "matrix-lp.allarddcs.nl"
# - name: SYNAPSE_REPORT_STATS
# value: "yes"
volumeMounts:
- mountPath: /data
name: matrix
volumes:
- name: matrix
persistentVolumeClaim:
claimName: matrix-pvc
---
apiVersion: v1
kind: Service
metadata:
name: matrix
namespace: matrix
spec:
ports:
- name: http
targetPort: 8008
port: 8008
selector:
app: matrix
type: NodePort
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: matrix-http
namespace: matrix
spec:
entryPoints:
- web
routes:
- match: Host(`matrix-lp.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: matrix-tls
namespace: matrix
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix-lp.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
tls:
secretName: matrix-lp.allarddcs.nl-tls
# certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: matrix-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/matrix/lp
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: matrix-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: matrix-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,2 @@
account: test
recovery key: EsTz j5um bhAd WG9u GiHo bkt5 EjRa 7Bhh UxbH 89PT QEn2 zm2e

47
lp/matrix/prod/README.md Normal file
View File

@@ -0,0 +1,47 @@
#registratie admin:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u admin -p Matrix01@ \
-a -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#registratie gewone gebruiker:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u diederick -p Matrix01@ \
--no-admin -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#algemeen:
usage: register_new_matrix_user [-h] [-u USER] [--exists-ok] [-p PASSWORD | --password-file PASSWORD_FILE] [-t USER_TYPE] [-a | --no-admin] (-c CONFIG | -k SHARED_SECRET)
[server_url]
Used to register new users with a given homeserver when registration has been disabled. The homeserver must be configured with
the 'registration_shared_secret' option set.
positional arguments:
server_url URL to use to talk to the homeserver. By default, tries to find a suitable URL from the configuration
file. Otherwise, defaults to 'http://localhost:8008'.
options:
-h, --help show this help message and exit
-u USER, --user USER Local part of the new user. Will prompt if omitted.
--exists-ok Do not fail if user already exists.
-p PASSWORD, --password PASSWORD
New password for user. Will prompt for a password if this flag and `--password-file` are both omitted.
--password-file PASSWORD_FILE
File containing the new password for user. If set, will override `--password`.
-t USER_TYPE, --user_type USER_TYPE
User type as specified in synapse.api.constants.UserTypes
-a, --admin Register new user as an admin. Will prompt if --no-admin is not set either.
--no-admin Register new user as a regular user. Will prompt if --admin is not set either.
-c CONFIG, --config CONFIG
Path to server config file. Used to read in shared secret.
-k SHARED_SECRET, --shared-secret SHARED_SECRET
Shared secret as defined in server config file.
#coturn:
check udp:
nc -zvu coturn-prod.allarddcs.nl 3478
nc -zv coturn-prod.allarddcs.nl 3478
nc -zv coturn-prod.allarddcs.nl 5349

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: coturn-cert
namespace: matrix
spec:
secretName: coturn-cert
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- "matrix-prod.allarddcs.nl"

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: coturn-prod.allarddcs.nl-tls
namespace: matrix
spec:
dnsNames:
- cotrun-prod.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: coturn-prod.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

153
lp/matrix/prod/coturn.yaml Normal file
View File

@@ -0,0 +1,153 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coturn
namespace: matrix
spec:
replicas: 1
selector:
matchLabels:
app: coturn
template:
metadata:
labels:
app: coturn
spec:
containers:
- name: coturn
image: coturn/coturn:latest
ports:
- name: turn-udp
containerPort: 3478
protocol: UDP
- name: turn-tcp
containerPort: 3478
protocol: TCP
- name: turns-tcp
containerPort: 5349
protocol: TCP
volumeMounts:
- name: coturn-cert
mountPath: "/etc/coturn/certs"
readOnly: true
- name: coturn-config
mountPath: /etc/coturn
- name: coturn-data
mountPath: /var/log
subPath: logs
args:
- "--tls-listening-port=5349"
- "--cert=/etc/coturn/certs/tls.crt"
- "--pkey=/etc/coturn/certs/tls.key"
volumes:
- name: coturn-config
configMap:
name: coturn-config
- name: coturn-data
persistentVolumeClaim:
claimName: coturn-pvc
- name: coturn-cert
secret:
secretName: coturn-cert
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coturn-config
namespace: matrix
data:
turnserver.conf: |
listening-port=3478
tls-listening-port=5349
listening-ip=0.0.0.0
relay-ip=0.0.0.0
total-quota=100
bps-capacity=0
cert=/etc/coturn/certs/fullchain.pem
pkey=/etc/coturn/certs/privkey.pem
log-file=/var/log/turnserver.log
no-stdout-log
verbose
min-port=49152
max-port=65535
# External IP (public or internal depending on setup)
listening-ip=0.0.0.0
relay-ip=0.0.0.0
external-ip=82.174.234.158
# Secure authentication
use-auth-secret
static-auth-secret=heleenvanderpol
realm=matrix-prod.allarddcs.nl
# Enable TLS
cert=/etc/coturn/certs/fullchain.pem
pkey=/etc/coturn/certs/privkey.pem
# WebRTC-specific settings
fingerprint
lt-cred-mech
stale-nonce
# Allow peers to communicate via relay
no-multicast-peers
no-loopback-peers
---
apiVersion: v1
kind: Service
metadata:
name: coturn
namespace: matrix
spec:
selector:
app: coturn
type: LoadBalancer
ports:
- name: coturn-udp
port: 3478
targetPort: 3478
protocol: UDP
- name: coturn-tcp
port: 3478
targetPort: 3478
protocol: TCP
- name: coturn-tls
port: 5349
targetPort: 5349
protocol: TCP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: coturn-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: coturn-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: coturn-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/coturn
readOnly: false

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: acme-challenge
namespace: cert-manager
spec:
entryPoints:
- web
routes:
- match: PathPrefix(`/\.well-known/acme-challenge/`)
kind: Rule
services:
- name: cert-manager
port: 80

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: matrix-prod.allarddcs.nl-tls
namespace: matrix
spec:
dnsNames:
- matrix-prod.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: matrix-prod.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

116
lp/matrix/prod/matrix.yaml Executable file
View File

@@ -0,0 +1,116 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix
namespace: matrix
labels:
app: matrix
spec:
replicas: 1
selector:
matchLabels:
app: matrix
template:
metadata:
labels:
app: matrix
spec:
containers:
- name: matrix
image: matrixdotorg/synapse:latest
# args:
# - generate
env:
- name: SYNAPSE_SERVER_NAME
value: "matrix.allarddcs.nl"
# - name: SYNAPSE_REPORT_STATS
# value: "yes"
volumeMounts:
- mountPath: /data
name: matrix
volumes:
- name: matrix
persistentVolumeClaim:
claimName: matrix-pvc
---
apiVersion: v1
kind: Service
metadata:
name: matrix
namespace: matrix
spec:
ports:
- name: http
targetPort: 8008
port: 8008
selector:
app: matrix
type: NodePort
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: matrix-http
namespace: matrix
spec:
entryPoints:
- web
routes:
- match: Host(`matrix-prod.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mattrix-tls
namespace: matrix
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix-prod.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
tls:
secretName: matrix-prod.allarddcs.nl-tls
# certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: matrix-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/matrix/prod
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: matrix-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: matrix-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

5
lp/mongodb/README.md Executable file
View File

@@ -0,0 +1,5 @@
# kubernetes mongodb Manifest YAML's
Kubernetes manifests for mongoDB Deployment.
Full Documentation: Refer https://devopscube.com/deploy-mongodb-kubernetes/ for step by step process to use these manifests.

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-mongodb
title: Mongodb (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

21
lp/mongodb/ingressroute.yaml Executable file
View File

@@ -0,0 +1,21 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: mongodb-express-tls
namespace: mongodb
spec:
entryPoints:
- websecure
routes:
- match: Host(`mongodb-lp.allarddcs.nl`)
kind: Rule
services:
- name: mongo-express
port: 8081
tls:
certResolver: letsencrypt

47
lp/mongodb/mongo-express.yaml Executable file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb-express
namespace: mongodb
labels:
app: mongo-express
spec:
replicas: 1
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: mongo-express
ports:
- containerPort: 8081
env:
- name: ME_CONFIG_OPTIONS_EDITORTHEME
value: "ambiance"
- name: ME_CONFIG_MONGODB_SERVER
value: mongodb
- name: ME_CONFIG_BASICAUTH_USERNAME
value: "admin"
- name: ME_CONFIG_BASICAUTH_PASSWORD
value: "Mongodb01"
---
apiVersion: v1
kind: Service
metadata:
name: mongo-express
namespace: mongodb
labels:
name: mongo-express
spec:
type: ClusterIP
ports:
- port: 8081
name: http
selector:
app: mongo-express

86
lp/mongodb/mongodb.yaml Executable file
View File

@@ -0,0 +1,86 @@
apiVersion: apps/v1
kind: ReplicaSet
metadata:
labels:
app: mongodb
name: mongodb
namespace: mongodb
spec:
replicas: 1
selector:
matchLabels:
app: mongodb
template:
metadata:
labels:
app: mongodb
spec:
containers:
- image: mongodb/mongodb-community-server
name: mongodb
# args: ["--dbpath","/data/db"]
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: admin
- name: MONGO_INITDB_ROOT_PASSWORD
value: Mongodb01
volumeMounts:
- name: "mongo-data-dir"
mountPath: "/data/db"
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: "mongo-data-dir"
persistentVolumeClaim:
claimName: "mongo-data"
---
apiVersion: v1
kind: Service
metadata:
labels:
app: mongodb
name: mongodb
namespace: mongodb
spec:
ports:
- port: 27017
protocol: TCP
targetPort: 27017
selector:
app: mongodb
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongo-data-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/mongodb
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongo-data
namespace: mongodb
spec:
storageClassName: ""
volumeName: mongo-data-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

35
lp/nfs/README.md Executable file
View File

@@ -0,0 +1,35 @@
NFS Installatie:
================
helm repo add nfs-subdir-external-provisioner \
https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
helm install -n nfs-provisioning --create-namespace nfs-subdir-external-provisioner \
nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
--set nfs.server=192.168.2.110 --set nfs.path=/opt/dynamic-storage
Testen:
kubectl apply -f demo-pvc.yaml
kubectl apply -f test-pod.yaml
kubectl exec -it test-pod -n nfs-provisioning -- df -h
#nfs als default storage instellen:
kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
microk8s-hostpath microk8s.io/hostpath Delete WaitForFirstConsumer false 33m
nfs-client cluster.local/nfs-subdir-external-provisioner Delete Immediate true 18m
kubectl patch storageclass microk8s-hostpath -p \
'{"metadata": \
{"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
kubectl patch storageclass nfs-client -p \
'{"metadata": \
{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

11
lp/nfs/catalog-info.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-nfs
title: Nfs (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

12
lp/nfs/demo-pvc.yaml Executable file
View File

@@ -0,0 +1,12 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: demo-claim
namespace: nfs-provisioning
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Mi

18
lp/nfs/k3s/nfs.yaml Executable file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioning
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: nfs-provisioning
namespace: kube-system
spec:
repo: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
chart: nfs-subdir-external-provisioner
targetNamespace: nfs-provisioning
valuesContent: |-
nfs:
server: 192.168.2.100
path: /opt/dynamic-storage

View File

@@ -0,0 +1,13 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.2.110
share: /mnt/nfs_share/nfs-dynamic/
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- hard
- nfsvers=4.1

29
lp/nfs/test-nfs.yaml Executable file
View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fedoratest
labels:
app: fedoratest
spec:
replicas: 1
selector:
matchLabels:
app: fedora
template:
metadata:
labels:
app: fedora
spec:
containers:
- name: fedora
image: fedora
command: ["/bin/bash", "-c", "--"]
args: ["while true; do sleep 30; done;"]
volumeMounts:
- name: nfs-vol
mountPath: /opt/nfs
volumes:
- name: nfs-vol
nfs:
server: 192.168.40.100
path: /mnt/nfs_share

22
lp/nfs/test-pod.yaml Executable file
View File

@@ -0,0 +1,22 @@
kind: Pod
apiVersion: v1
metadata:
name: test-pod
namespace: nfs-provisioning
spec:
containers:
- name: test-pod
image: busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && sleep 600"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: demo-claim

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-pgadmin
title: Pgadmin (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

16
lp/pgadmin/certificate.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: pgadmin-lp.allarddcs.nl-tls
namespace: postgres
spec:
dnsNames:
- pgadmin-lp.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: pgadmin-lp.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

134
lp/pgadmin/pgadmin.yaml Executable file
View File

@@ -0,0 +1,134 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/pgadmin/lp
type: Directory
---
#apiVersion: v1
#kind: PersistentVolume
#metadata:
# name: pgadmin-pv
#spec:
# storageClassName: ""
# capacity:
# storage: 1Gi
# accessModes:
# - ReadWriteMany
# persistentVolumeReclaimPolicy: Retain
# mountOptions:
# - hard
# - nfsvers=4.1
# nfs:
# server: 192.168.2.110
# path: /mnt/nfs_share/pgadmin/lp
# readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: pgadmin-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
namespace: postgres
labels:
app: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
containers:
- name: pgadmin
image: dpage/pgadmin4
ports:
- containerPort: 80
env:
- name: PGADMIN_DEFAULT_EMAIL
value: admin@alldcs.nl
- name: PGADMIN_DEFAULT_PASSWORD
value: Pgadmin01@
volumeMounts:
- mountPath: /var/lib/pgadmin
name: pgadmin
volumes:
- name: pgadmin
persistentVolumeClaim:
claimName: pgadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pgadmin
namespace: postgres
labels:
name: pgadmin
spec:
selector:
app.kubernetes.io/name: pgadmin
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: pgadmin
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-tls
namespace: postgres
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin-lp.allarddcs.nl`)
kind: Rule
services:
- name: pgadmin
port: 80
tls:
secretName: pgadmin-lp.allarddcs.nl-tls
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-http
namespace: postgres
spec:
entryPoints:
- web
routes:
- match: Host(`pgadmin-lp.allarddcs.nl`)
kind: Rule
services:
- name: pgadmin
port: 80

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-postfixadmin
title: Postfixadmin (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

104
lp/postfixadmin/postfixadmin.yaml Executable file
View File

@@ -0,0 +1,104 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: postfixadmin-pv
labels:
type: local
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/postfixadmin
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postfixadmin-pvc
namespace: mail
spec:
storageClassName: ""
volumeName: postfixadmin-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1G
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: postfixadmin
namespace: mail
labels:
app: postfixadmin
spec:
replicas: 1
selector:
matchLabels:
app: postfixadmin
template:
metadata:
labels:
app: postfixadmin
spec:
containers:
- name: postfixadmin
image: amd64/postfixadmin
ports:
- containerPort: 80
env:
- name: POSTFIXADMIN_DB_TYPE
value: pgsql
- name: POSTFIXADMIN_DB_HOST
value: postgres13.postgres
- name: POSTFIXADMIN_DB_USER
value: postfixadmin
- name: POSTFIXADMIN_DB_PASSWORD
value: postfixadmin
- name: POSTFIXADMIN_DB_NAME
value: postfixadmin
volumeMounts:
- name: postfixconfig
mountPath: /var/www/html/
volumes:
- name: postfixconfig
persistentVolumeClaim:
claimName: postfixadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postfixadmin
namespace: mail
spec:
selector:
app.kubernetes.io/name: postfixadmin
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: postfixadmin
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: ingressroute-postfixadmin-tls
namespace: mail
spec:
entryPoints:
- websecure
routes:
- match: Host(`postfixadmin-lp.allarddcs.nl`)
kind: Rule
services:
- name: postfixadmin
port: 80
tls:
certResolver: letsencrypt

18
lp/postgres13/README.md Normal file
View File

@@ -0,0 +1,18 @@
#corrupte WAL-archive
#postgres starten zonder database te starten door volgende toe te voegen in yaml::
command: ["sh"]
args: ["-c", "while true; do echo $(date -u) >> /tmp/run.log; sleep 5; done"]
#dan inloggen in draaiende container
kubectl exec -it postgres14-0 -n postgres -- sh
#Switchen naar user POSTGRES
su postgres
#WAL-arhive resetten:
pg_resetwal /var/lib/postgresql/data -f

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-postgres13
title: Postgres13 (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

90
lp/postgres13/postgres13lp.yaml Executable file
View File

@@ -0,0 +1,90 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres13
namespace: postgres
spec:
serviceName: postgres13
replicas: 1
selector:
matchLabels:
app: postgres13
template:
metadata:
labels:
app: postgres13
spec:
containers:
- name: postgres13
image: postgres:13
# command: ["sh"]
# args: ["-c", "while true; do echo $(date -u) >> /tmp/run.log; sleep 5; done"]
resources:
limits:
memory: 500Mi
cpu: 1
requests:
memory: 200Mi
cpu: 500m
ports:
- containerPort: 5432
env:
- name: POSTGRES_DB
value: quay
- name: POSTGRES_USER
value: quay
- name: POSTGRES_PASSWORD
value: quay
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres
volumes:
- name: postgres
persistentVolumeClaim:
claimName: postgres13-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres13
labels:
name: postgres13
namespace: postgres
spec:
type: ClusterIP
ports:
- port: 5432
name: postgres
selector:
app: postgres13
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres13-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
volumeMode: Filesystem
hostPath:
path: /mnt/nfs_share/postgres13lp
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres13-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: postgres13-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 2Gi

88
lp/quay/README.md Executable file
View File

@@ -0,0 +1,88 @@
#Installatie
#eigen namespace:
kubectl create ns quay
#Redis:
is al geinstalleeerd op de odroid, 192.168.2.239:6379 password: Redis01@
#Postgress13:
is al geinstalleerd op de odroid. 192.168.2.235:5432 password: quay
installeren van extensies:
- inloggen incontainer:
kubectl exec -it postgres13-0 -n postgres
- inloggen op dadabase quay:
psql -U quay --dbname=quay
- sql uitvoeren:
create extension if not exists pg_trgm;
#quay
kubectl apply -f quay.yaml -n quay
#SSL configureren:
Mounten van een certificaat is helaas niet mogelijk omdat dit dezelfde directory in de container
is waar je ook al config.yaml mount vanaf de nfs-share.
daarom:
- eerst op pisvrwsv01 letsencrypt-certificaat laten maken via certmanager:
kubectl apply -f certificate.yaml
Er ontstaat dan een secret: "quay.alldcs.nl-tls"
dan de ssl-cert en ssl.key extraheren:
kubectl get secret quay.alldcs.nl-tls -o json -o=jsonpath="{.data.tls\.crt}" | base64 -d > ssl.cert
kubectl get secret quay.alldcs.nl-tls -o json -o=jsonpath="{.data.tls\.key}" | base64 -d > ssl.key
Vervolgens deze twee bestandjes kopieren naar /mnt/nfs-share/quay/conf op de NFS-server.
NIET VERGETEN ZE LEESBAAR TE MAKEN: chmod 777....
#verdere configuratie:
stop quay:
kubectl delete -f quay.yaml
start quay in config mode:
kubectl apply -f quay-config.yaml
op een of andere manier werkt de ingressroute nu niet meer, daarom moet je nodeport
gebruiken:
ga naar:
localhost:<nodeport>
LET OP: dit moet!! LOCALHOST zijn.
log in met quay config enhet password uit de yamlfile: config01
configureer quay
download de config.yaml (die komt in /home/ubuntu/Downloads
untar de config.yaml
tar -zxf
kopieer de config.yaml naar de /mnt/nfs_share/quay/conf directory
start de gewone quay weer op.
kubectl apply -f quay.yaml
#autorisaties
je kunt FEATURE_USER_CREATION: true zetten in de config.yaml en dan gebruiker opvoeren.
vervolgens met pgadmin voor dit account "verified" op "true" zetten (via mail werkt nog niet).
Deze gebruiker kun je dan in de config.yaml bij SUPER_USER opvoeren.
Dan quay opnieuw opstarten en je bent administrator!
#integratie met Clair:
Quay starten in config mode:
kubectl run --rm -it --name quay_config -p 8080:8080 \
-v /home/ubuntu:/conf/stack \
quay.io/projectquay/quay:v3.10.0 config Quay01@@

11
lp/quay/catalog-info.yaml Normal file
View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-quay
title: Quay (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: quay.alldcs.nl-tls
namespace: quay
spec:
dnsNames:
- quay-lp.alldcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: quay-lp.alldcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -0,0 +1,2 @@
cd ~/
microk8s kubectl create secret generic quay-credentials --from-file=.dockerconfigjson=.docker/config.json --type=kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: quay-credentials
data:
.dockerconfigjson: ewogICJhdXRocyI6IHsKICAgICJxdWF5LmFsbGRjcy5ubCI6IHsKICAgICAgImF1dGgiOiAiWVd4c1lYSmtLM0p2WW05ME9raE1VVXBFVlZNMlNGSTNSMHRPVkVvMlJVNDFWVW8xT0VkS1FURXlOVkpMU0VOSU1FbzNVRkJPVGtGRlFVczJRMFpCVkRVNFNVdEVOalZhUTBsQ05qWT0iLAogICAgICAiZW1haWwiOiAiIgogICAgfQogIH0KfQ==
type: kubernetes.io/dockerconfigjson

14
lp/quay/ingressroute-http.yaml Executable file
View File

@@ -0,0 +1,14 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: quay-http
namespace: quay
spec:
entryPoints:
- web
routes:
- match: Host(`quay-lp.allarddcs.nl`)
kind: Rule
services:
- name: quay
port: 8080

16
lp/quay/ingressroute-tls.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: quay-tls
namespace: quay
spec:
entryPoints:
- websecure
routes:
- match: Host(`quay-lp.allarddcs.nl`)
kind: Rule
services:
- name: quay
port: 443
tls:
certResolver: letsencrypt

16
lp/quay/ingressrouteTCP.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: quay-tcp
namespace: quay
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`quay-lp.alldcs.nl`)
services:
- name: quay
port: 443
tls:
passthrough: true

94
lp/quay/quay-config.yaml Executable file
View File

@@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: quay
namespace: quay
labels:
app: quay
spec:
replicas: 1
selector:
matchLabels:
app: quay
template:
metadata:
labels:
app: quay
spec:
containers:
- name: quay
image: quay.io/projectquay/quay:latest
args:
- config
env:
- name: CONFIG_APP_PASSWORD
value: config01
ports:
- containerPort: 8080
name: web
- containerPort: 8443
name: websecure
volumeMounts:
- mountPath: /quay-registry/conf/stack/
name: quay
subPath: conf
- mountPath: /quay-registry/datastorage/
name: quay
subPath: data
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: quay
persistentVolumeClaim:
claimName: quay-pvc
---
apiVersion: v1
kind: Service
metadata:
name: quay
namespace: quay
spec:
ports:
- name: http
targetPort: 8080
port: 8080
- name: https
targetPort: 8443
port: 443
selector:
app: quay
type: NodePort
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: quay-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/quay
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: quay-pvc
namespace: quay
spec:
storageClassName: ""
volumeName: quay-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

89
lp/quay/quay.yaml Executable file
View File

@@ -0,0 +1,89 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: quay
namespace: quay
labels:
app: quay
spec:
replicas: 1
selector:
matchLabels:
app: quay
template:
metadata:
labels:
app: quay
spec:
containers:
- name: quay
image: quay.io/projectquay/quay:latest
ports:
- containerPort: 8080
name: web
- containerPort: 8443
name: websecure
volumeMounts:
- mountPath: /quay-registry/conf/stack/
name: quay
subPath: conf
- mountPath: /quay-registry/datastorage/
name: quay
subPath: data
nodeSelector:
kubernetes.io/arch: amd64
volumes:
- name: quay
persistentVolumeClaim:
claimName: quay-pvc
---
apiVersion: v1
kind: Service
metadata:
name: quay
namespace: quay
spec:
ports:
- name: http
targetPort: 8080
port: 8080
- name: https
targetPort: 8443
port: 443
selector:
app: quay
type: NodePort
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: quay-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/quay
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: quay-pvc
namespace: quay
spec:
storageClassName: ""
volumeName: quay-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

4
lp/roundcube/README.md Normal file
View File

@@ -0,0 +1,4 @@
helm repo add mlohr https://helm-charts.mlohr.com/
helm repo update
helm install roundcube mlohr/roundcube -f values.yaml -n mail

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-roundcube
title: Roundcube (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

16
lp/roundcube/certificate.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: roudcube-lp.allarddcs.nl-tls
namespace: mail
spec:
dnsNames:
- roundcube-lp.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: roundcube-lp.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -0,0 +1,17 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: roundcube-tls
namespace: mail
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`roundcube-lp.allarddcs.nl`)
services:
- name: roundcube
port: 80
tls:
secretName: roundcube-lp.allarddcs.nl-tls
# certResolver: letsencrypt

2
lp/roundcube/install.sh Executable file
View File

@@ -0,0 +1,2 @@
microk8s helm install roundcube mlohr/roundcube -f values.yaml -n mail
microk8s kubectl get pod -n mail | grep roundcube

1
lp/roundcube/uninstall.sh Executable file
View File

@@ -0,0 +1 @@
microk8s helm uninstall roundcube -n mail

1
lp/roundcube/upgrade.sh Executable file
View File

@@ -0,0 +1 @@
microk8s helm upgrade roundcube mlohr/roundcube -f values.yaml -n mail

408
lp/roundcube/values.yaml Normal file
View File

@@ -0,0 +1,408 @@
## Roundcube Docker image
image: roundcube/roundcubemail:{{ .Chart.AppVersion }}-fpm-alpine
## ImagePullPolicy for Roundcube Docker image
imagePullPolicy: IfNotPresent
## RBAC Role Based Access Control for resources
rbac:
# Specifies whether RBAC resources should be created
create: false
rules: []
## Enable RBAC rules
# - apiGroups:
# - myApiGroups
# resourceNames:
# - myResourceNames
# resources:
# - myResources
# verbs:
# - use
## Service Account for Deployment
serviceAccount:
# Specifies whether ServiceAccount resource should be created
create: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using a template
name: ""
additionalLabels: {}
additionalAnnotations: {}
## Roundcube Core Configuration
config:
## DES Key for session encryption.
## Will be generated randomly if not set.
desKey:
## User interface default language.
defaultLanguage: en_US
## Allow for custom skin installation
skins:
classic:
enabled: false
composerPackage:
name: roundcube/classic
version: 1.6.0
larry:
enabled: false
composerPackage:
name: roundcube/larry
version: 1.6.1
## custom skin example
#custom:
# enabled: true
# composerPackage:
# name: custom (defaults to roundcube/{{ skin name }})
# version: 1.0 (defaults to rounddube version)
## User interface default skin.
defaultSkin: elastic
## Plugin configuration
plugins:
## https://github.com/roundcube/roundcubemail/blob/master/plugins/managesieve/config.inc.php.dist
managesieve:
enabled: false
config:
host: "%h:4190"
useTls: true
## Context menu plugin: Adds context menus with common tasks to various parts of Roundcube
## https://packagist.org/packages/johndoh/contextmenu
## Example for a dynamically installed plugin (via composer)
contextmenu:
## Enabled installing and loading the plugin.
enabled: false
## composer package information
composerPackage:
name: "johndoh/contextmenu"
version: "3.3"
## Maximum allowed upload file size (e.g., for attachmentes).
## Do not forget to adjust ingress accordingly, e.g., by adding the following annotation:
## nginx.ingress.kubernetes.io/proxy-body-size: 25m
uploadMaxFilesize: 25M
## Custom Roundcube configuration parameters
customRoundcubeConfig: |
/*
* Custom Roundcube Configuration values
* Check https://github.com/roundcube/roundcubemail/wiki/Configuration for available options.
*/
// $config['skin_logo'] = "";
// $config['support_url'] = "https://roundcube.net/";
## Custom configuration for PHP interpreter
customPhpConfig: |
## Configure custom memory limit for PHP
# memory_limit=128M
## Custom configuration for PHP-FPM
## Comment lines with semicolumn (;) in this file
## don't use pound (#) to comment lines
customPhpFpmConfig: |
;pm.max_children=200
;pm.status_path=/status
## Configure database creation/usage
database:
## Which database source should be used
## Supported values:
## * external: Do not create a database, but use an existing external database. See `externalDatabase` for further configuration.
## * zalando-postgres: Request a database to be created by Zalando Postgres operator (https://github.com/zalando/postgres-operator, operator needs to be preinstalled)
source: external
## Number of database cluster instances to be launched
## (only applicable for source=zalando-postgres)
instances:
## Size of the volume to be created for the database
volumeSize: 5Gi
## Name of the StorageClass to be used for the volume
storageClassName:
## Configuration for an external database (not deployed within this chart)
## When working with a NetworkPolicy, add an egress rule to networkPolicy.egress.additionalRules to allow access to the database.
externalDatabase:
## Type of the external database. supported: mysql, pgsql, sqlite
type: pgsql
## Hostname of the external database server
host: postgres13.postgres
## Port number of the external database server
port: 5432
## Database name
name: roundkube
## Username for the external database server
user: roundkube
## Password for the external database server
password: roundkube
## IMAP settings
imap:
## Hostname or IP address of your IMAP server
host: mail.allarddcs.nl
## IMAP port. Default ports depending on encryption setting:
## 143 - none, STARTTLS
## 993 - SSL/TLS (recommended)
port: 143
## Transport encryption. Please change port accordingly. Allowed values:
## none - no encryption
## starttls - use STARTTLS
## ssltls - use SSL/TLS (recommended)
encryption: starttls
## SMTP settings
smtp:
## Hostname or IP address of your SMTP server
host: mail.allarddcs.nl
## SMTP port. Default ports depending on encryption setting:
## 25 - none
## 465 - SSL/TLS
## 587 - STARTTLS (recommended)
port: 587
## Transport encryption. Please change port accordingly. Allowed values:
## none - no encryption
## starttls - use STARTTLS
## ssltls - use SSL/TLS (recommended)
encryption: starttls
## SMTP username (use "%u" tp use IMAP username)
username: "%u"
## SMTP password (use "%p" tp use IMAP password)
password: "%p"
## Additional configuration for the ConfigMap resource
configMap:
## Additional labels for the ConfigMap resource
additionalLabels: {}
## Additional annotations for the ConfigMap resource
additionalAnnotations: {}
## Additional configuration for the Secret resource
secret:
## Additional labels for the Secret resource
additionalLabels: {}
## Additional annotations for the Secret resource
additionalAnnotations: {}
## Additional configuration for the Deployment resource
deployment:
## Number of instances of Roundcube to be started
replicas: 1
## Deployment strategy (see https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#DeploymentSpec)
strategy:
## Deployment strategy type
type: RollingUpdate
## Roundcube container's HTTP port
port: 80
## Define additional ImagePullSecrets.
imagePullSecrets:
## You can refer to existing ImagePullSecrets as follows:
# - name: my-custom-secret
## Additional labels for the Deployment resource
additionalLabels: {}
## Additional annotations for the Deployment resource
additionalAnnotations: {}
## Pod affinity rule preset (see https://docs.bitnami.com/tutorials/assign-pod-nodes-helm-affinity-rules/)
podAffinityPreset: ""
## Pod anti affinity rule preset (see https://docs.bitnami.com/tutorials/assign-pod-nodes-helm-affinity-rules/)
podAntiAffinityPreset: hard
## Custom affinity rules (overrides podAffinityPreset and podAntiAffinityPreset)
affinity: {}
## Configure resource allocation and limits
# resources:
# requests:
# memory: "256Mi"
# cpu: "1000m"
# limits:
# memory: "256Mi"
# cpu: "1000m"
## Allow defining custom actions to be executed before deploying main container (e.g.: to prepare database)
initContainers: []
# - name: init
# image: alpine:3
# command: [sh, -c]
# args:
# - echo "Hello, world!" > /mnt/extra-storage/test
# volumeMounts:
# - name: extra-storage
# mountPath: /mnt/extra-storage
## Additional volume mounts for the Deployment resource
additionalVolumeMounts: []
# - name: extra-storage
# mountPath: /mnt/extra-storage
## Additional volumes for the Deployment resource
additionalVolumes: []
# - name: extra-storage
# emptyDir: {}
## Additional environment variables for the Deployment resource
additionalEnv: []
# - name: MY_ENV_VAR
# value: value
containers:
nginx:
image: nginx:1.25.4
imagePullPolicy: IfNotPresent
## Additional configuration for the Service resource
service:
## Service port for HTTP
port: 80
## Additional labels for the Service resource
additionalLabels: {}
## Additional annotations for the Service resource
additionalAnnotations: {}
## https://kubernetes.io/docs/concepts/services-networking/dual-stack/
ipFamilyPolicy: PreferDualStack
## Ingress resource configuration
ingress:
## Enable Ingress resource
enabled: true
## Whether deploy https or plain http ingress
useTLS: true
class:
host: roundcube.allarddcs.nl
path: "/"
pathType: ImplementationSpecific
Classname: nginx
## TLS settings
tls:
## Configure secret name, e.g., to re-use an existing secret
secretName:
## Additional labels for the Ingress resource
additionalLabels: {}
## Additional annotations for the Ingress resource
additionalAnnotations: {}
## PodDisruptionBudget configuration
pdb:
## Enable PodDisruptionBudget
enabled: true
minAvailable: 1
maxUnavailable: ""
## Additional labels for the PodDisruptionBudget resource
additionalLabels: {}
## Additional annotations for the PodDisruptionBudget resource
additionalAnnotations: {}
## NetworkPolicy configuration
## A NetworkPolicy can prevent unauthorized access to remote hosts on network level.
## By default, if enabled, the NetworkPolicy allows to connect to any IP with the IMAP and SMTP ports configured above.
## This also applies for the managesieve plugin, if enabled.
## More information on network policies: https://kubernetes.io/docs/concepts/services-networking/network-policies/
networkPolicy:
## Create a NetworkPolicy resource
enabled: false
## Egress policies for Roundcube environment
egress:
## IMAP related Egress policies
imap:
## Explicitely configure allowed peers for the IMAP connection.
## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#networkpolicypeer-v1-networking-k8s-io for all possible options.
peers:
- ipBlock:
cidr: 0.0.0.0/0
## SMTP related Egress policies
smtp:
## Explicitely configure allowed peers for the SMTP connection.
## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#networkpolicypeer-v1-networking-k8s-io for all possible options.
peers:
- ipBlock:
cidr: 0.0.0.0/0
## managesieve related Egress policies
managesieve:
## Explicitely configure allowed peers for the managesieve connection. Will only be used if managesieve plugin is enabled.
## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#networkpolicypeer-v1-networking-k8s-io for all possible options.
peers:
- ipBlock:
cidr: 0.0.0.0/0
## Add additional, custom egress rules.
## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#networkpolicyegressrule-v1-networking-k8s-io.
additionalRules: []
## Example rule for a zalando postgres cluster called "shared-postgres" in namespace "postgres"
# - to:
# - namespaceSelector:
# matchLabels:
# kubernetes.io/metadata.name: postgres
# - podSelector:
# matchLabels:
# cluster-name: shared-postgres
# ports:
# - port: 5432
# protocol: TCP
## Additional labels for the NetworkPolicy
additionalLabels: {}
## Additional annotations for the NetworkPolicy
additionalAnnotations: {}

36
lp/traefik/README.md Normal file
View File

@@ -0,0 +1,36 @@
1) traefik installeren via helmchart:
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
kubectl create namespace traefik
2) persistent storage aanmaken:
kubect apply -f traefik-pvc
When enabling persistence for certificates, permissions on acme.json can be
lost when Traefik restarts. You can ensure correct permissions with an
initContainer. See https://github.com/traefik/traefik-helm-chart/blob/master/EXAMPLES.md#use-traefik-native-lets-encrypt-integration-without-cert-manager
3) Installeren
helm install traefik traefik/traefik -f values.yaml -n traefik
CHECK OF PORTFORWARDING VAN POORT 80 en 443 OP DE ROUTER NAAR DE LOADBALANCER GOED STAAT!
HERSTART NA WIJZIGING DE KPN-ROUTER!
4) TLS verzwaren (tlsoption.yml is afkomstig van whoami-voorbeeld)
kubectl apply -f tlsoption.yaml
7) Daschboard toegankelijk maken (dashboard.yaml is afkomstig van helm-documentatie van traefik zelf)
kubectl apply -f ingressroute-dashboard.yaml - n traefik
#migreren:
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: lp-traefik
title: Traefik (lp)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik
rules:
- apiGroups: ["traefik.io"]
resources: ["ingressroutes", "ingressroutesstatus"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["services", "endpoints", "pods", "secrets"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik-ingressroute
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik

View File

@@ -0,0 +1,10 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: forwarded-headers
spec:
headers:
customResponseHeaders:
X-Forwarded-Proto: "https"
X-Forwarded-For: "{remote}"
X-Real-IP: "{remote}"

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: traefik
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik-lp.allarddcs.nl`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`))
kind: Rule
services:
- name: api@internal
kind: TraefikService

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: dev-http
namespace: traefik
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: dev-http
namespace: traefik
subsets:
- addresses:
- ip: 192.168.2.181
ports:
- port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: dev-http
namespace: traefik
spec:
entryPoints:
- web
routes:
- match: HostRegexp(`^[a-z0-9]+-dev\.allarddcs\.nl$`)
kind: Rule
services:
- name: dev-http
port: 80

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: gitea-http
namespace: traefik
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: gitea-http
namespace: traefik
subsets:
- addresses:
- ip: 192.168.2.181
ports:
- port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: gitea-http
namespace: traefik
spec:
entryPoints:
- web
routes:
- match: Host(`gitea-dev.allarddcs.nl`)
kind: Rule
services:
- name: gitea-http
port: 80
- match: Host(`gitea-dev.allarddcs.nl`) && PathPrefix(`/.well-known/acme-challenge/`)
kind: Rule
services:
- name: gitea-http
port: 80

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: odroid-http
namespace: traefik
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: odroid-http
namespace: traefik
subsets:
- addresses:
- ip: 192.168.2.230
ports:
- port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: odroid-http
namespace: traefik
spec:
entryPoints:
- web
routes:
- match: HostRegexp(`^[a-z0-9]+-odroid\.allarddcs\.nl$`)
kind: Rule
services:
- name: odroid-http
port: 80

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: prod-http
namespace: traefik
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: prod-http
namespace: traefik
subsets:
- addresses:
- ip: 192.168.2.191
ports:
- port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: prod-http
namespace: traefik
spec:
entryPoints:
- web
routes:
- match: HostRegexp(`^[a-z0-9]+-prod\.allarddcs\.nl$`)
kind: Rule
services:
- name: prod-http
port: 80

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: riscv-http
namespace: traefik
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Endpoints
metadata:
name: riscv-http
namespace: traefik
subsets:
- addresses:
- ip: 192.168.2.113
ports:
- port: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: riscv-http
namespace: traefik
spec:
entryPoints:
- web
routes:
- match: HostRegexp(`^[a-z0-9]+-riscv\.allarddcs\.nl$`)
kind: Rule
services:
- name: riscv-http
port: 80

Some files were not shown because too many files have changed in this diff Show More