This commit is contained in:
allard
2025-12-03 06:17:44 +01:00
parent af8363abaa
commit 34c0849dca
30 changed files with 3361 additions and 934 deletions

1
dev/argocd/mailtest.yaml Normal file
View File

@@ -0,0 +1 @@
test

50
dev/argocd/values.old Normal file
View File

@@ -0,0 +1,50 @@
ingress:
server:
enabled: true
ingressClassName: traefik
hosts:
- host: argocd-dev.allarddcs.nl
paths:
- "/"
tls:
- hosts:
- argocd-dev.allarddcs.nl
secretName: argocd-tls-cert
configs:
params:
# disable insecure (HTTP)
server.insecure: "false"
configs:
resource.customizations: |
rbac.authorization.k8s.io/ClusterRole:
ignoreDifferences: |
jsonPointers:
- /metadata/annotations/argocd.argoproj.io~1tracking-id
rbac.authorization.k8s.io/ClusterRoleBinding:
ignoreDifferences: |
jsonPointers:
- /metadata/annotations/argocd.argoproj.io~1tracking-id
server:
tls:
enabled: true
# name of the TLS secret (created via cert-manager)
secretName: argocd-tls-cert
repoServer:
extraArgs:
- --parallelismlimit=1
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 10
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 10

View File

@@ -48,3 +48,76 @@ repoServer:
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 10
ingress:
server:
enabled: true
ingressClassName: traefik
hosts:
- host: argocd-dev.allarddcs.nl
paths:
- "/"
tls:
- hosts:
- argocd-dev.allarddcs.nl
secretName: argocd-tls-cert
configs:
params:
server.insecure: "false"
resource.customizations: |
rbac.authorization.k8s.io/ClusterRole:
ignoreDifferences: |
jsonPointers:
- /metadata/annotations/argocd.argoproj.io~1tracking-id
rbac.authorization.k8s.io/ClusterRoleBinding:
ignoreDifferences: |
jsonPointers:
- /metadata/annotations/argocd.argoproj.io~1tracking-id
server:
tls:
enabled: true
secretName: argocd-tls-cert
repoServer:
extraArgs:
- --parallelismlimit=1
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 10
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 2
failureThreshold: 10
# ---------------- Notifications ----------------
notifications:
enabled: true
config:
# SMTP email service
service.email.smtp: |
username: argocd@allarddcs.nl
password: Argocd01@
host: smtp.allarddcs.nl
port: 587
from: argocd@allarddcs.nl
# Optional template for sync failure
template.app-sync-failed: |
subject: ArgoCD: Application {{.app.metadata.name}} sync failed
body: |
Application {{.app.metadata.name}} failed to sync.
Status: {{.app.status.sync.status}}
Health: {{.app.status.health.status}}
# Global subscription for all apps
subscriptions:
- recipients:
- admin@allarddcs.nl
triggers:
- app-sync-failed

View File

@@ -1,2 +1,18 @@
#Installatie:
helm repo add zabbix-community https://zabbix-community.github.io/helm-zabbix
helm repo update
helm install zabbix zabbix-community/zabbix \
-f ~/zabbix_values.yaml \
-n monitoring
Default user: Admin
Default password: zabbix
=====
- user : admin
- password: Zabbix01@

View File

@@ -1,18 +0,0 @@
zabbixAgent:
enabled: true
env:
- name: ZBX_PASSIVE_SERVERS
value: 0.0.0.0/0
zabbixProxy:
enabled: true
env:
- name: ZBX_HOSTNAME
value: zabbix-proxy
- name: ZBX_SERVER_HOST
value: "zabbix-zabbix-server.monitoring:10051"
- name: ZBX_CACHESIZE
value: 256M
- name: ZBX_VALUECACHESIZE
value: 128M
- name: ZBX_CONFIGFREQUENCY
value: 60

View File

@@ -1,540 +0,0 @@
# Default values for zabbix.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Zabbix components (server, agent, web frontend, ...) image tag to use.
#This helm chart is compatible with non-LTS version of Zabbix, that include important changes and functionalities.
#But by default this helm chart will install the latest LTS version (example: 6.0.x).
#See more info in [Zabbix Life Cycle & Release Policy](https://www.zabbix.com/life_cycle_and_release_policy) page
#When you want use a non-LTS version (example: 6.2.x), you have to set this yourself. You can change version
#here or overwrite in each component (example: zabbixserver.image.tag, etc).
zabbixImageTag: ubuntu-6.0.20
# **Zabbix Postgresql access / credentials** configurations
# with this dict, you can set unified PostgreSQL access credentials, IP and so on for both Zabbix Server and Zabbix web frontend
# you can either chose from having all this in one named (preexisting) secret or setting the values one by one with vars
# whatever set here overrides the credential settings within the "zabbixServer" and "zabbixWeb" sections.
postgresAccess:
# under this section you can configure PostgreSQL access and credentials centrally for the entire helm chart.
# All relevant components installed by this chart will respect it: zabbixServer, zabbixWeb and postgresql (if enabled)
#
# USING ONE SECRET CONTAINING ALL DB RELEVANT SETTINGS
# PostgreSQL access details all in one existing secret (matches the structure of secrets the CrunchyData pgo operator generates)
# if this option is chosen the below listed settings are being ignored
# the secret must contain the following keys:
# * host
# * port
# * user
# * password
# * database
# -- Whether to use the unified PostgreSQL access secret
useUnifiedSecret: false
# -- Name of one secret for unified configuration of PostgreSQL access
unifiedSecretName: zabbixdb-pguser-zabbix
# -- automatically create secret if not already present (works only in combination with postgresql.enabled=true)
unifiedSecretAutoCreate: true
#
# If you do NOT want to use one unified secret for all settings, you can still set the credentials manually here.
# These settings will be used for all components of this chart where it makes sense (zabbix server, postgresql,
# web frontend, ...)
# -- Address of database host - ignored if postgresql.enabled=true
host: "postgres14.postgres"
# -- Port of database host - ignored if postgresql.enabled=true
port: "5432"
# -- User of database
user: "zabbix"
# -- Name of a secret used for Postgres Password, if set, it overrules the POSTGRES_PASSWORD value
#passwordSecret: mysecret
# -- Key of the secret used for Postgres Password, requires POSTGRES_PASSWORD_SECRET, defaults to password
#passwordSecretKey: "password"
# -- Password of database - ignored if passwordSecret is set
password: "zabbix"
# -- Name of database
database: "zabbix"
# **Zabbix Server** configurations
zabbixServer:
# -- Enables use of **Zabbix Server**
enabled: true
# -- Number of replicas of ``zabbixServer`` module
replicaCount: 1
# -- set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1
podAntiAffinity: true
# -- optional set true open a port direct on node where Zabbix Server runs
hostPort: false
# -- optional set hostIP different from 0.0.0.0 to open port only on this IP
hostIP: 0.0.0.0
# -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)
resources: {}
image:
# -- Zabbix Server Docker image name
repository: zabbix/zabbix-server-pgsql
# -- Zabbix Server Docker image tag, if you want to override zabbixImageTag
tag: null
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
# -- automatically clean orphaned ha nodes from ha_nodes db table
haNodesAutoClean:
enabled: true
image:
# -- Postgresql Docker image name: chose one of "postgres" or "timescale/timescaledb"
repository: postgres
# -- Tag of Docker image of Postgresql server, choice "15" for postgres "2.10.3-pg15" for timescaledb
# (Zabbix supports TimescaleDB 2.0.1-2.10.x. More info: https://www.zabbix.com/documentation/6.0/en/manual/installation/requirements)
# Added support for PostgreSQL versions 15.x since Zabbix 6.0.10
tag: 15
pullPolicy: IfNotPresent
pullSecrets: []
schedule: "0 1 * * *"
deleteOlderThanSeconds: 3600
# -- Extra environment variables. A list of additional environment variables.
extraEnv: []
# -- additional volumeMounts to the cronjob hanodes autoclean
extraVolumeMounts: []
# -- additional containers to start within the cronjob hanodes autoclean
extraContainers: []
# -- additional init containers to start within the cronjob hanodes autoclean
extraInitContainers: []
# -- additional volumes to make available to the cronjob hanodes autoclean
extraVolumes: []
# -- additional specifications to the cronjob hanodes autoclean
extraPodSpecs: {}
service:
# -- Type of service in Kubernetes cluster
type: ClusterIP
# -- Cluster IP for Zabbix Server
# -- externalTrafficPolicy for Zabbix Server. "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, depending on your network settings.
#externalTrafficPolicy: Local
# -- IPs if use service type LoadBalancer"
externalIPs: []
loadBalancerIP: ""
clusterIP:
# -- Port of service in Kubernetes cluster
port: 10051
# -- NodePort of service on each node
nodePort: 31051
# -- Annotations for the zabbix-server service
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/server-pgsql/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml
extraEnv: []
#- name: ENABLE_TIMESCALEDB
# value: "true"
# -- annotations to add to the deployment
deploymentAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional volumeMounts to the Zabbix Server container
extraVolumeMounts: []
# -- additional containers to start within the Zabbix Server pod
extraContainers: []
# -- additional init containers to start within the Zabbix Server pod
extraInitContainers: []
# -- additional volumes to make available to the Zabbix Server pod
extraVolumes: []
# -- additional specifications to the Zabbix Server pod
extraPodSpecs: {}
# **PostgreSQL** configurations
postgresql:
# -- Create a database using Postgresql
enabled: false
image:
# -- Postgresql Docker image name: chose one of "postgres" or "timescale/timescaledb"
repository: postgres
# -- Tag of Docker image of Postgresql server, choice "15" for postgres "2.11.1-pg15" for timescaledb
# (Zabbix supports TimescaleDB 2.0.1-2.11.x. More info: https://www.zabbix.com/documentation/6.0/en/manual/installation/requirements)
# Added support for PostgreSQL versions 15.x since Zabbix 6.0.10
tag: 15
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
persistence:
# -- whether to enable persistent storage for the postgres container or not
enabled: false
# -- existing persistent volume claim name to be used to store posgres data
existingClaimName: false
# -- size of the PVC to be automatically generated
storageSize: 5Gi
# -- storage PVC storageclass to use
#storageClass: my-storage-class
service:
# -- Type of service in Kubernetes cluster
type: ClusterIP
# -- Cluster IP for Zabbix Server
clusterIP:
# -- Port of service in Kubernetes cluster
port: 5432
# -- Annotations for the zabbix-server service
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- Extra Postgresql runtime parameters ("-c" options)
extraRuntimeParameters:
max_connections: 50
# -- Extra environment variables. A list of additional environment variables.
extraEnv: []
# -- annotations to add to the statefulset
statefulSetAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional volumeMounts to the postgresql container
extraVolumeMounts: []
# -- additional containers to start within the postgresql pod
extraContainers: []
# -- additional init containers to start within the postgresql pod
extraInitContainers: []
# -- additional volumes to make available to the postgresql pod
extraVolumes: []
# -- additional specifications to the postgresql pod
extraPodSpecs: {}
# **Zabbix Proxy** configurations
zabbixProxy:
# -- Enables use of **Zabbix Proxy**
enabled: false
# -- Number of replicas of ``zabbixProxy`` module
replicaCount: 1
# -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)
resources: {}
image:
# -- Zabbix Proxy Docker image name
repository: zabbix/zabbix-proxy-sqlite3
# -- Zabbix Proxy Docker image tag, if you want to override zabbixImageTag
tag: null
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
# -- The variable allows to switch Zabbix Proxy mode. Bu default, value is 0 - active proxy. Allowed values are 0 and 1.
ZBX_PROXYMODE: 0
# -- Zabbix Proxy hostname
# Case sensitive hostname
ZBX_HOSTNAME: zabbix-proxy # This variable is unique, case sensitive hostname.
# -- Zabbix Server host
ZBX_SERVER_HOST: zabbix-zabbix-server
# -- Zabbix Server port
ZBX_SERVER_PORT: 10051
# ZBX_LOADMODULE: dummy1.so,dummy2.so # The variable is list of comma separated loadable Zabbix modules. It works with volume /var/lib/zabbix/modules.
# The variable is used to specify debug level, from 0 to 5
ZBX_DEBUGLEVEL: 4
# The variable is used to specify timeout for processing checks. By default, value is 4.
ZBX_TIMEOUT: 4
# -- The variable enable communication with Zabbix Java Gateway to collect Java related checks. By default, value is false.
ZBX_JAVAGATEWAY_ENABLE: false
# -- Cache size
ZBX_VMWARECACHESIZE: 128M
service:
# -- Type of service for Zabbix Proxy
type: ClusterIP
# -- Cluster IP for Zabbix Proxy
clusterIP:
# -- Port to expose service
port: 10051
# -- Annotations for the zabbix-proxy service
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/proxy-sqlite3/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml
extraEnv: []
# -- annotations to add to the statefulset
statefulSetAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional volumeMounts to the Zabbix Proxy container
extraVolumeMounts: []
# -- additional containers to start within the Zabbix Proxy pod
extraContainers: []
# -- additional init containers to start within the Zabbix Proxy pod
extraInitContainers: []
# -- additional volumes to make available to the Zabbix Proxy pod
extraVolumes: []
# -- additional specifications to the Zabbix Proxy pod
extraPodSpecs: {}
# -- extra volumeClaimTemplate for zabbixProxy statefulset
extraVolumeClaimTemplate: []
# **Zabbix Agent** configurations
zabbixAgent:
# -- Enables use of **Zabbix Agent**
enabled: true
# -- Its is a default mode. Zabbix-agent will run as sidecar in zabbix-server and zabbix-proxy pods. Disable this mode if you want to run zabbix-agent as daemonSet
runAsSidecar: true
# -- Enable this mode if you want to run zabbix-agent as daemonSet. The 'zabbixAgent.runAsSidecar' option must be false.
runAsDaemonSet: false
# -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)
resources: {}
image:
# -- Zabbix Agent Docker image name. Can use zabbix/zabbix-agent or zabbix/zabbix-agent2
repository: zabbix/zabbix-agent2
# -- Zabbix Agent Docker image tag, if you want to override zabbixImageTag
tag: null
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
# -- Zabbix Server host
ZBX_SERVER_HOST: 0.0.0.0/0
# -- Zabbix Server port
ZBX_SERVER_PORT: 10051
# -- This variable is boolean (true or false) and enables or disables feature of passive checks. By default, value is true
ZBX_PASSIVE_ALLOW: true
# -- The variable is comma separated list of allowed Zabbix Server or proxy hosts for connections to Zabbix Agent container. Example: Server=127.0.0.1,192.168.1.0/24,::1,2001:db8::/32,zabbix.example.com
#ZBX_PASSIVESERVERS: ''
# -- This variable is boolean (true or false) and enables or disables feature of active checks
ZBX_ACTIVE_ALLOW: false
# -- The variable is comma separated list of allowed Zabbix Server or proxy hosts for connections to Zabbix Agent container. You may specify port.
#ZBX_ACTIVESERVERS: ''
# -- The variable is list of comma separated loadable Zabbix modules. It works with volume /var/lib/zabbix/modules.
#ZBX_LOADMODULE: dummy1.so,dummy2.so
# -- The variable is used to specify debug level, from 0 to 5
ZBX_DEBUGLEVEL: 3
# -- The variable is used to specify timeout for processing checks. By default, value is 4.
ZBX_TIMEOUT: 4
service:
# -- Type of service for Zabbix Agent
type: ClusterIP
# -- Cluster IP for Zabbix Agent
clusterIP:
# -- Port to expose service
port: 10050
# -- externalTrafficPolicy for Zabbix Agent service. "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, depending on your network settings.
#externalTrafficPolicy: Local
listenOnAllInterfaces: true
# -- NodePort port to allocate (only if service.type = NodePort)
#nodePort: 31050
# -- Annotations for the zabbix-agent service
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- If true, agent pods mounts host / at /host/root
hostRootFsMount: true
# -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/agent2/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml
extraEnv: []
# -- additional volumeMounts to the zabbix Agent container
extraVolumeMounts: []
# -- annotations to add to the daemonSet
daemonSetAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional containers to start within the Zabbix Agent pod
extraContainers: []
# -- additional init containers to start within the Zabbix Agent pod
extraInitContainers: []
# -- additional volumes to make available to the Zabbix Agent pod
extraVolumes: []
# -- additional specifications to the Zabbix Agent pod
extraPodSpecs: {}
# **Zabbix Web** configurations
zabbixWeb:
# -- Enables use of **Zabbix Web**
enabled: true
# -- Number of replicas of ``zabbixWeb`` module
replicaCount: 1
# -- set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1
podAntiAffinity: true
# -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)
resources: {}
image:
# -- Zabbix Web Docker image name
repository: zabbix/zabbix-web-nginx-pgsql
# -- Zabbix Web Docker image tag, if you want to override zabbixImageTag
tag: null
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
# -- Certificate containing certificates for SAML configuration
#samlCertsSecretName: zabbix-web-samlcerts
service:
# -- Type of service for Zabbix Web
type: ClusterIP
# -- externalTrafficPolicy for Zabbix Web. "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, depending on your network settings.
#externalTrafficPolicy: Local
# -- IPs if use service type LoadBalancer"
externalIPs: []
loadBalancerIP: ""
# -- Cluster IP for Zabbix Web
clusterIP:
# -- Port to expose service
port: 80
# -- NodePort port to allocate (only if service.type = NodePort)
#nodePort: 31080
# -- Annotations for the Zabbix Web
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/web-apache-pgsql/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml
extraEnv: []
#- name: ZBX_SSO_SETTINGS
# value: '{"baseurl": "https://zabbix.example.com"}'
#- name: ZBX_SERVER_NAME
# value: Demo Zabbix
# -- annotations to add to the deployment
deploymentAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional volumeMounts to the Zabbix Web container
extraVolumeMounts: []
# -- additional containers to start within the Zabbix Web pod
extraContainers: []
# -- additional init containers to start within the Zabbix Web pod
extraInitContainers: []
# -- additional volumes to make available to the Zabbix Web pod
extraVolumes: []
# -- additional specifications to the Zabbix Web pod
extraPodSpecs: {}
livenessProbe:
# -- Path of health check of application
path: /
# -- Number of seconds after the container has started before liveness
initialDelaySeconds: 30
# -- Specifies that the kubelet should perform a liveness probe every N seconds
periodSeconds: 10
# -- Number of seconds after which the probe times out
timeoutSeconds: 5
# -- When a probe fails, Kubernetes will try failureThreshold times before giving up. Giving up in case of liveness probe means restarting the container. In case of readiness probe the Pod will be marked Unready
failureThreshold: 6
# -- Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
readinessProbe:
# -- Path of health check of application
path: /
# -- Number of seconds after the container has started before readiness
initialDelaySeconds: 5
# -- Specifies that the kubelet should perform a readiness probe every N seconds
periodSeconds: 10
# -- Number of seconds after which the probe times out
timeoutSeconds: 5
# -- When a probe fails, Kubernetes will try failureThreshold times before giving up. Giving up in case of liveness probe means restarting the container. In case of readiness probe the Pod will be marked Unready
failureThreshold: 6
# -- Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# **Zabbix Web Service** configurations
zabbixWebService:
# -- Enables use of **Zabbix Web Service**
enabled: true
# -- Number of replicas of ``zabbixWebService`` module
replicaCount: 1
# -- set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1
podAntiAffinity: true
# -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)
resources: {}
image:
# -- Zabbix Webservice Docker image name
repository: zabbix/zabbix-web-service
# -- Zabbix Webservice Docker image tag, if you want to override zabbixImageTag
tag: null
# -- Pull policy of Docker image
pullPolicy: IfNotPresent
# -- List of dockerconfig secrets names to use when pulling images
pullSecrets: []
# -- set the IgnoreURLCertErrors configuration setting of Zabbix Web Service
#ignoreURLCertErrors=1
service:
# -- Type of service for Zabbix Web
type: ClusterIP
# -- Cluster IP for Zabbix Web
clusterIP:
# -- Port to expose service
port: 10053
# -- Annotations for the Zabbix Web Service
annotations: {}
# metallb.universe.tf/address-pool: production-public-ips
# -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/web-service/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml
extraEnv: []
# -- annotations to add to the deployment
deploymentAnnotations: {}
# -- annotations to add to the containers
containerAnnotations: {}
# -- additional volumeMounts to the Zabbix Web Service container
extraVolumeMounts: []
# -- additional containers to start within the Zabbix Web Service pod
extraContainers: []
# -- additional init containers to start within the Zabbix Web Service pod
extraInitContainers: []
# -- additional volumes to make available to the Zabbix Web Service pod
extraVolumes: []
# -- additional specifications to the Zabbix Web Service pod
extraPodSpecs: {}
# Ingress configurations
ingress:
# -- Enables Ingress
enabled: false
# -- Ingress annotations
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress hosts
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
# -- Ingress TLS configuration
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# -- pathType is only for k8s >= 1.1=
pathType: Prefix
# Ingress CRD object for the Traefik Ingresscontroller
ingressRoute:
# -- Enables Traefik IngressRoute
enabled: false
# -- IngressRoute annotations
annotations: {}
# -- Ingressroute entrypoints
entryPoints:
- websecure
# -- Ingressroute host name
hostName: chart-example.local
# -- IngressRoute TLS configuration
#tls:
# certResolver: myresolver
route:
# -- Enables Route object for Openshift
enabled: false
# -- Host Name for the route. Can be left empty
hostName: chart-example.local
# -- Openshift Route wildcardPolicy
#wildcardPolicy:
# -- Openshift Route TLS settings
tls:
termination: edge
#insecureEdgeTerminationPolicy: Redirect
# -- Openshift Route extra annotations
annotations: {}
# -- nodeSelector configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
# -- Tolerations configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
# -- Affinity configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
affinity: {}
# -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext: {}
karpenter:
# -- Enables support provisioner of Karpenter. Reference: https://karpenter.sh/. Tested only using EKS cluster 1.23 in AWS with Karpenter 0.19.2.
enabled: false
# -- Name of cluster. Change the term CHANGE_HERE by EKS cluster name if you want to use Karpenter.
clusterName: "CHANGE_HERE"
# -- Tag of discovery with name of cluster used by Karpenter. Change the term CHANGE_HERE by EKS cluster name if you want to use Karpenter. The cluster name, security group and subnets must have this tag.
tag: "karpenter.sh/discovery/CHANGE_HERE: CHANGE_HERE"
# -- Resource limits constrain the total size of the cluster. Limits prevent Karpenter from creating new instances once the limit is exceeded.
limits:
resources:
cpu: "1000"
memory: 1000Gi

1099
dev/zabbix/values.bak Normal file

File diff suppressed because it is too large Load Diff

1086
dev/zabbix/values.org Normal file

File diff suppressed because it is too large Load Diff

1021
dev/zabbix/values.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,47 +0,0 @@
#registratie admin:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u admin -p Matrix01@ \
-a -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#registratie gewone gebruiker:
kubectl exec -it matrix-644984f6b7-d7jcp -n matrix -- register_new_matrix_user http://localhost:8008 -u diederick -p Matrix01@ \
--no-admin -k f0hE.OTU8UXQ44yIHPWtO+8CKhM-b:QZNngk_qhE8EvgmP-3h@
#algemeen:
usage: register_new_matrix_user [-h] [-u USER] [--exists-ok] [-p PASSWORD | --password-file PASSWORD_FILE] [-t USER_TYPE] [-a | --no-admin] (-c CONFIG | -k SHARED_SECRET)
[server_url]
Used to register new users with a given homeserver when registration has been disabled. The homeserver must be configured with
the 'registration_shared_secret' option set.
positional arguments:
server_url URL to use to talk to the homeserver. By default, tries to find a suitable URL from the configuration
file. Otherwise, defaults to 'http://localhost:8008'.
options:
-h, --help show this help message and exit
-u USER, --user USER Local part of the new user. Will prompt if omitted.
--exists-ok Do not fail if user already exists.
-p PASSWORD, --password PASSWORD
New password for user. Will prompt for a password if this flag and `--password-file` are both omitted.
--password-file PASSWORD_FILE
File containing the new password for user. If set, will override `--password`.
-t USER_TYPE, --user_type USER_TYPE
User type as specified in synapse.api.constants.UserTypes
-a, --admin Register new user as an admin. Will prompt if --no-admin is not set either.
--no-admin Register new user as a regular user. Will prompt if --admin is not set either.
-c CONFIG, --config CONFIG
Path to server config file. Used to read in shared secret.
-k SHARED_SECRET, --shared-secret SHARED_SECRET
Shared secret as defined in server config file.
#coturn:
check udp:
nc -zvu coturn-prod.allarddcs.nl 3478
nc -zv coturn-prod.allarddcs.nl 3478
nc -zv coturn-prod.allarddcs.nl 5349

View File

@@ -1,12 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: coturn-cert
namespace: matrix
spec:
secretName: coturn-cert
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- "matrix-prod.allarddcs.nl"

View File

@@ -1,16 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: coturn-prod.allarddcs.nl-tls
namespace: matrix
spec:
dnsNames:
- cotrun-prod.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: coturn-prod.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -1,153 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coturn
namespace: matrix
spec:
replicas: 1
selector:
matchLabels:
app: coturn
template:
metadata:
labels:
app: coturn
spec:
containers:
- name: coturn
image: coturn/coturn:latest
ports:
- name: turn-udp
containerPort: 3478
protocol: UDP
- name: turn-tcp
containerPort: 3478
protocol: TCP
- name: turns-tcp
containerPort: 5349
protocol: TCP
volumeMounts:
- name: coturn-cert
mountPath: "/etc/coturn/certs"
readOnly: true
- name: coturn-config
mountPath: /etc/coturn
- name: coturn-data
mountPath: /var/log
subPath: logs
args:
- "--tls-listening-port=5349"
- "--cert=/etc/coturn/certs/tls.crt"
- "--pkey=/etc/coturn/certs/tls.key"
volumes:
- name: coturn-config
configMap:
name: coturn-config
- name: coturn-data
persistentVolumeClaim:
claimName: coturn-pvc
- name: coturn-cert
secret:
secretName: coturn-cert
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coturn-config
namespace: matrix
data:
turnserver.conf: |
listening-port=3478
tls-listening-port=5349
listening-ip=0.0.0.0
relay-ip=0.0.0.0
total-quota=100
bps-capacity=0
cert=/etc/coturn/certs/fullchain.pem
pkey=/etc/coturn/certs/privkey.pem
log-file=/var/log/turnserver.log
no-stdout-log
verbose
min-port=49152
max-port=65535
# External IP (public or internal depending on setup)
listening-ip=0.0.0.0
relay-ip=0.0.0.0
external-ip=82.174.234.158
# Secure authentication
use-auth-secret
static-auth-secret=heleenvanderpol
realm=matrix-prod.allarddcs.nl
# Enable TLS
cert=/etc/coturn/certs/fullchain.pem
pkey=/etc/coturn/certs/privkey.pem
# WebRTC-specific settings
fingerprint
lt-cred-mech
stale-nonce
# Allow peers to communicate via relay
no-multicast-peers
no-loopback-peers
---
apiVersion: v1
kind: Service
metadata:
name: coturn
namespace: matrix
spec:
selector:
app: coturn
type: LoadBalancer
ports:
- name: coturn-udp
port: 3478
targetPort: 3478
protocol: UDP
- name: coturn-tcp
port: 3478
targetPort: 3478
protocol: TCP
- name: coturn-tls
port: 5349
targetPort: 5349
protocol: TCP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: coturn-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: coturn-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: coturn-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/coturn
readOnly: false

View File

@@ -1,14 +0,0 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: acme-challenge
namespace: cert-manager
spec:
entryPoints:
- web
routes:
- match: PathPrefix(`/\.well-known/acme-challenge/`)
kind: Rule
services:
- name: cert-manager
port: 80

View File

@@ -1,16 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: matrix-prod.allarddcs.nl-tls
namespace: matrix
spec:
dnsNames:
- matrix-prod.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: matrix-prod.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -1,116 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix
namespace: matrix
labels:
app: matrix
spec:
replicas: 1
selector:
matchLabels:
app: matrix
template:
metadata:
labels:
app: matrix
spec:
containers:
- name: matrix
image: matrixdotorg/synapse:latest
# args:
# - generate
env:
- name: SYNAPSE_SERVER_NAME
value: "matrix.allarddcs.nl"
# - name: SYNAPSE_REPORT_STATS
# value: "yes"
volumeMounts:
- mountPath: /data
name: matrix
volumes:
- name: matrix
persistentVolumeClaim:
claimName: matrix-pvc
---
apiVersion: v1
kind: Service
metadata:
name: matrix
namespace: matrix
spec:
ports:
- name: http
targetPort: 8008
port: 8008
selector:
app: matrix
type: NodePort
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: matrix-http
namespace: matrix
spec:
entryPoints:
- web
routes:
- match: Host(`matrix-prod.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: mattrix-tls
namespace: matrix
spec:
entryPoints:
- websecure
routes:
- match: Host(`matrix-prod.allarddcs.nl`)
kind: Rule
services:
- name: matrix
port: 8008
tls:
secretName: matrix-prod.allarddcs.nl-tls
# certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: matrix-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/matrix/prod
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: matrix-pvc
namespace: matrix
spec:
storageClassName: ""
volumeName: matrix-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -1,3 +1,7 @@
Username: admin
Password: Minio01@
Username: I8VTWUTPITYMGQ8BL22V
Password: Snd6kC8KL73E7FyHaf3vMmgD8iDSHlGdEcrAHIoJ
Note: Copy the credentials to a secure location. MinIO will not display these again.

View File

@@ -1,3 +1,12 @@
#inloggen:
user: admin
password: NextcloudNextcloud01
user: allard
password: Nextcloud01@
INSIDE THE NEXTCLOUD CONTAINER:
===============================