This commit is contained in:
Ubuntu
2025-11-27 22:01:44 +01:00
parent 0c80e8f2d3
commit 9d24a4b906
5 changed files with 1632 additions and 2 deletions

View File

@@ -0,0 +1,283 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: defectdojo
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
app: defectdojo
template:
metadata:
labels:
app: defectdojo
spec:
containers:
- name: defectdojo
image: defectdojo/defectdojo:2.31.0
env:
- name: DD_DATABASE_URL
valueFrom:
secretKeyRef:
name: defectdojo-secret
key: DD_DATABASE_URL
- name: DD_ADMIN_USER
valueFrom:
secretKeyRef:
name: defectdojo-secret
key: DD_ADMIN_USER
- name: DD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: defectdojo-secret
key: DD_ADMIN_PASSWORD
- name: DD_REDIS_HOST
value: redis
- name: DD_REDIS_PORT
value: "6379"
volumeMounts:
- name: web-storage
mountPath: /app/media
volumes:
- name: web-storage
persistentVolumeClaim:
claimName: dd-web-pvc
---
apiVersion: v1
kind: Service
metadata:
name: defectdojo
namespace: defectdojo
spec:
ports:
- port: 8080
targetPort: 8080
selector:
app: defectdojo
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: celery-worker
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
app: celery-worker
template:
metadata:
labels:
app: celery-worker
spec:
containers:
- name: celery-worker
image: defectdojo/worker:2.31.0
env:
- name: DD_DATABASE_URL
valueFrom:
secretKeyRef:
name: defectdojo-secret
key: DD_DATABASE_URL
- name: DD_REDIS_HOST
value: redis
volumeMounts:
- name: worker-storage
mountPath: /app/media
volumes:
- name: worker-storage
persistentVolumeClaim:
claimName: dd-celeryworker-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: celery-beat
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
app: celery-beat
template:
metadata:
labels:
app: celery-beat
spec:
containers:
- name: celery-beat
image: defectdojo/beat:2.31.0
env:
- name: DD_DATABASE_URL
valueFrom:
secretKeyRef:
name: defectdojo-secret
key: DD_DATABASE_URL
- name: DD_REDIS_HOST
value: redis
volumeMounts:
- name: beat-storage
mountPath: /app/media
volumes:
- name: beat-storage
persistentVolumeClaim:
claimName: dd-celerybeat-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7
volumeMounts:
- mountPath: /data
name: redis-storage
command: ["redis-server", "--appendonly", "yes"]
volumes:
- name: redis-storage
persistentVolumeClaim:
claimName: dd-redis-pvc
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: defectdojo
spec:
ports:
- port: 6379
selector:
app: redis
---
apiVersion: v1
kind: Secret
metadata:
name: defectdojo-secret
namespace: defectdojo
type: Opaque
stringData:
DD_DATABASE_URL: "postgres://defectdojo:defectdojo@postgres15.postgres.svc.cluster.local:5432/defectdojo"
DD_ADMIN_USER: "admin"
DD_ADMIN_PASSWORD: "Defectdojo01@"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-defectdojo-nginx
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/nginx
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-defectdojo-redis
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-defectdojo-celerybeat
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/celerybeat
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-defectdojo-celeryworker
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/celeryworker
persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-defectdojo-nginx
namespace: defectdojo
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
volumeName: pv-defectdojo-nginx
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-defectdojo-redis
namespace: defectdojo
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
volumeName: pv-defectdojo-redis
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-defectdojo-celerybeat
namespace: defectdojo
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
volumeName: pv-defectdojo-celerybeat
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-defectdojo-celeryworker
namespace: defectdojo
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
volumeName: pv-defectdojo-celeryworker

View File

@@ -0,0 +1,520 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: nginx
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: nginx
spec:
containers:
- env:
- name: NGINX_METRICS_ENABLED
value: "false"
- name: DD_UWSGI_HOST
value: "uwsgi.defectdojo"
- name: HTTP_AUTH_PASSWORD
value: "Defectdojo01@"
image: defectdojo/defectdojo-nginx
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 8080
- containerPort: 8443
resources: {}
volumeMounts:
- mountPath: /usr/share/nginx/html/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: defectdojo
spec:
ports:
- name: "8080"
port: 8080
targetPort: 8080
- name: "8443"
port: 8443
targetPort: 8443
selector:
io.kompose.service: nginx
status:
loadBalancer: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-media-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-media-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-media-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/media
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: uwsgi
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: uwsgi
spec:
containers:
- command:
- /wait-for-it.sh
- postgres15.postgres:5432
- -t
- "30"
- --
- /entrypoint-uwsgi.sh
env:
- name: DD_ALLOWED_HOSTS
value: '*'
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres15.postgres:5432/defectdojo
- name: DD_DEBUG
value: "False"
- name: DD_DJANGO_METRICS_ENABLED
value: "False"
- name: DD_ASYNC_FINDING_IMPORT
value: "False"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: uwsgi
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: defectdojo-uwsgi
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-uwsgi
persistentVolumeClaim:
claimName: defectdojo-uwsgi-pvc
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: Service
metadata:
name: uwsgi
namespace: defectdojo
spec:
ports:
- name: "3031"
port: 3031
targetPort: 3031
selector:
io.kompose.service: uwsgi
status:
loadBalancer: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-uwsgi-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-uwsgi-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-uwsgi-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/uwsgi
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celeryworker
name: celeryworker
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celeryworker
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celeryworker
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-worker.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: celeryworker
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: defectdojo-celeryworker
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-celeryworker
persistentVolumeClaim:
claimName: defectdojo-celeryworker-pvc
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-celeryworker-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-celeryworker-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-celeryworker-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/celeryworker
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celerybeat
name: celerybeat
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celerybeat
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celerybeat
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-beat.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: celerybeat
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: defectdojo-celerybeat
restartPolicy: Always
volumes:
- name: defectdojo-celerybeat
persistentVolumeClaim:
claimName: defectdojo-celerybeat-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-celerybeat-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-celerybeat-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-celerybeat-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/celerybeat
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: redis
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: redis
spec:
containers:
- image: redis
name: redis
resources: {}
volumeMounts:
- mountPath: /data
name: defectdojo-redis
restartPolicy: Always
volumes:
- name: defectdojo-redis
persistentVolumeClaim:
claimName: defectdojo-redis-pvc
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: defectdojo
spec:
ports:
- name: "6379"
port: 6379
targetPort: 6379
selector:
io.kompose.service: redis
status:
loadBalancer: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-redis-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-redis-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-redis-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis
readOnly: false
---
apiVersion: batch/v1
kind: Job
metadata:
name: initializer
namespace: defectdojo
spec:
backoffLimit: 3
template:
spec:
restartPolicy: OnFailure
containers:
- name: initializer
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
command:
- /wait-for-it.sh
- postgres15.postgres:5432
- --
- /entrypoint-initializer.sh
env:
- name: DD_ADMIN_FIRST_NAME
value: admin
- name: DD_ADMIN_LAST_NAME
value: admin
- name: DD_ADMIN_MAIL
value: admin@allarddcs.nl
- name: DD_ADMIN_USER
value: admin
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres15.postgres:5432/defectdojo
- name: DD_INITIALIZE
value: "true"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
volumeMounts:
- mountPath: /app/docker/extra_settings
name: initializer-claim0
volumes:
- name: initializer-claim0
persistentVolumeClaim:
claimName: initializer-claim0
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: initializer-claim0
name: initializer-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}

View File

@@ -126,7 +126,7 @@ spec:
- name: DD_CREDENTIAL_AES_256_KEY - name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw' value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL - name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo value: postgresql://defectdojo:defectdojo@postgres15.postgres:5432/defectdojo
- name: DD_DEBUG - name: DD_DEBUG
value: "False" value: "False"
- name: DD_DJANGO_METRICS_ENABLED - name: DD_DJANGO_METRICS_ENABLED
@@ -399,7 +399,7 @@ spec:
io.kompose.service: redis io.kompose.service: redis
spec: spec:
containers: containers:
- image: redis:7.2.4-alpine@sha256:a40e29800d387e3cf9431902e1e7a362e4d819233d68ae39380532c3310091ac - image: redis
name: redis name: redis
resources: {} resources: {}
volumeMounts: volumeMounts:
@@ -459,3 +459,5 @@ spec:
server: 192.168.2.110 server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis path: /mnt/nfs_share/defectdojo/redis
readOnly: false readOnly: false

View File

@@ -0,0 +1,825 @@
expose:
# Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
# and fill the information in the corresponding section
type: clusterIP
tls:
# Enable TLS or not.
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
# Note: if the "expose.type" is "ingress" and TLS is disabled,
# the port must be included in the command when pulling/pushing images.
# Refer to https://github.com/goharbor/harbor/issues/5291 for details.
enabled: true
# The source of the tls certificate. Set as "auto", "secret"
# or "none" and fill the information in the corresponding section
# 1) auto: generate the tls certificate automatically
# 2) secret: read the tls certificate from the specified secret.
# The tls certificate can be generated manually or by cert manager
# 3) none: configure no tls certificate for the ingress. If the default
# tls certificate is configured in the ingress controller, choose this option
certSource: auto
auto:
# The common name used to generate the certificate, it's necessary
# when the type isn't "ingress"
commonName: "harbor-dev.allarddcs.nl"
secret:
# The name of secret which contains keys named:
# "tls.crt" - the certificate
# "tls.key" - the private key
secretName: "harbor-dev.allarddcs.nl-tls"
clusterIP:
# The name of ClusterIP service
name: harbor
# The ip address of the ClusterIP service (leave empty for acquiring dynamic ip)
staticClusterIP: ""
ports:
# The service port Harbor listens on when serving HTTP
httpPort: 80
# The service port Harbor listens on when serving HTTPS
httpsPort: 443
# Annotations on the ClusterIP service
annotations: {}
# ClusterIP-specific labels
labels: {}
# The external URL for Harbor core service. It is used to
# 1) populate the docker/helm commands showed on portal
# 2) populate the token service URL returned to docker client
#
# Format: protocol://domain[:port]. Usually:
# 1) if "expose.type" is "ingress", the "domain" should be
# the value of "expose.ingress.hosts.core"
# 2) if "expose.type" is "clusterIP", the "domain" should be
# the value of "expose.clusterIP.name"
# 3) if "expose.type" is "nodePort", the "domain" should be
# the IP address of k8s node
#
# If Harbor is deployed behind the proxy, set it as the URL of proxy
externalURL: https://harbor-dev.allarddcs.nl
# The persistence is enabled by default and a default StorageClass
# is needed in the k8s cluster to provision volumes dynamically.
# Specify another StorageClass in the "storageClass" or set "existingClaim"
# if you already have existing persistent volumes to use
#
# For storing images and charts, you can also use "azure", "gcs", "s3",
# "swift" or "oss". Set it in the "imageChartStorage" section
persistence:
enabled: true
# Setting it to "keep" to avoid removing PVCs during a helm delete
# operation. Leaving it empty will delete PVCs after the chart deleted
# (this does not apply for PVCs that are created for internal database
# and redis components, i.e. they are never deleted automatically)
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
# Use the existing PVC which must be created manually before bound,
# and specify the "subPath" if the PVC is shared with other components
existingClaim: "harbor-pvc"
# Specify the "storageClass" used to provision the volume. Or the default
# StorageClass will be used (the default).
# Set it to "-" to disable dynamic provisioning
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
annotations: {}
jobservice:
jobLog:
existingClaim: "harbor-jobservice-pvc"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
# If external database is used, the following settings for database will
# be ignored
database:
existingClaim: ""
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
# If external Redis is used, the following settings for Redis will
# be ignored
redis:
existingClaim: "harbor-redis-pvc"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
trivy:
existingClaim: "harbor-trivy-pvc"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
annotations: {}
# The initial password of Harbor admin. Change it from portal after launching Harbor
# or give an existing secret for it
# key in secret is given via (default to HARBOR_ADMIN_PASSWORD)
existingSecretAdminPassword: ""
existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD
harborAdminPassword: "Harbor01@"
# The internal TLS used for harbor components secure communicating. In order to enable https
# in each component tls cert files need to provided in advance.
ipFamily:
# ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
ipv6:
enabled: true
# ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
ipv4:
enabled: true
# Sets the IP family policy for services to be able to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services).
policy: ""
# A list of IP families for services that should be supported, in the order in which they should be applied to ClusterIP. Can be "IPv4" and/or "IPv6".
families: []
imagePullPolicy: IfNotPresent
# Use this set to assign a list of default pullSecrets
imagePullSecrets:
# - name: docker-registry-secret
# - name: internal-registry-secret
# The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate"
# Set it as "Recreate" when "RWM" for volumes isn't supported
updateStrategy:
type: RollingUpdate
# debug, info, warning, error or fatal
logLevel: info
metrics:
enabled: false
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
## Create prometheus serviceMonitor to scrape harbor metrics.
## This requires the monitoring.coreos.com/v1 CRD. Please see
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md
##
serviceMonitor:
enabled: false
additionalLabels: {}
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# Metric relabel configs to apply to samples before ingestion.
metricRelabelings:
[]
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# Relabel configs to apply to samples before ingestion.
relabelings:
[]
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
trace:
enabled: false
# trace provider: jaeger or otel
# jaeger should be 1.26+
provider: jaeger
# set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
sample_rate: 1
# namespace used to differentiate different harbor services
# namespace:
# attributes is a key value dict contains user defined attributes used to initialize trace provider
# attributes:
# application: harbor
jaeger:
# jaeger supports two modes:
# collector mode(uncomment endpoint and uncomment username, password if needed)
# agent mode(uncomment agent_host and agent_port)
endpoint: http://hostname:14268/api/traces
# username:
# password:
# agent_host: hostname
# export trace data by jaeger.thrift in compact mode
# agent_port: 6831
otel:
endpoint: hostname:4318
url_path: /v1/traces
compression: false
insecure: true
# timeout is in seconds
timeout: 10
# If service exposed via "ingress", the Nginx will not be used
nginx:
image:
repository: goharbor/nginx-photon
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
portal:
image:
repository: goharbor/harbor-portal
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## Additional service annotations
serviceAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
core:
image:
repository: goharbor/harbor-core
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
## Startup probe values
startupProbe:
enabled: true
initialDelaySeconds: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## Additional service annotations
serviceAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
## User settings configuration json string
configureUserSettings:
# The provider for updating project quota(usage), there are 2 options, redis or db.
# By default it is implemented by db but you can configure it to redis which
# can improve the performance of high concurrent pushing to the same project,
# and reduce the database connections spike and occupies.
# Using redis will bring up some delay for quota usage updation for display, so only
# suggest switch provider to redis if you were ran into the db connections spike around
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
quotaUpdateProvider: db # Or redis
# Secret is used when core server communicates with other components.
# If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret
# Must be a string of 16 chars.
secret: ""
# Fill in the name of a kubernetes secret if you want to use your own
# If using existingSecret, the key must be secret
existingSecret: ""
# Fill the name of a kubernetes secret if you want to use your own
# TLS certificate and private key for token encryption/decryption.
# The secret must contain keys named:
# "tls.key" - the private key
# "tls.crt" - the certificate
secretName: ""
# If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead.
# If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated.
# tokenKey and tokenCert must BOTH be set or BOTH unset.
# The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line.
tokenKey: |
# If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line.
tokenCert: |
# The XSRF key. Will be generated automatically if it isn't specified
# While you specified, Please make sure it is 32 characters, otherwise would have validation issue at the harbor-core runtime
# https://github.com/goharbor/harbor/pull/21154
xsrfKey: ""
# If using existingSecret, the key is defined by core.existingXsrfSecretKey
existingXsrfSecret: ""
# If using existingSecret, the key
existingXsrfSecretKey: CSRF_KEY
# The time duration for async update artifact pull_time and repository
# pull_count, the unit is second. Will be 10 seconds if it isn't set.
# eg. artifactPullAsyncFlushDuration: 10
artifactPullAsyncFlushDuration:
gdpr:
deleteUser: false
auditLogsCompliant: false
jobservice:
image:
repository: goharbor/harbor-jobservice
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
maxJobWorkers: 10
# The logger for jobs: "file", "database" or "stdout"
jobLoggers:
- file
# - database
# - stdout
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
loggerSweeperDuration: 14 #days
notification:
webhook_job_max_retry: 3
webhook_job_http_client_timeout: 3 # in seconds
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168
# Secret is used when job service communicates with other components.
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# Use an existing secret resource
existingSecret: ""
# Key within the existing secret for the job service secret
existingSecretKey: JOBSERVICE_SECRET
registry:
registry:
image:
repository: goharbor/registry-photon
tag: v2.14.0
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
controller:
image:
repository: goharbor/harbor-registryctl
tag: v2.14.0
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# Secret is used to secure the upload state from client
# and registry storage backend.
# See: https://github.com/distribution/distribution/blob/release/2.8/docs/configuration.md#http
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# Use an existing secret resource
existingSecret: ""
# Key within the existing secret for the registry service secret
existingSecretKey: REGISTRY_HTTP_SECRET
# If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
relativeurls: false
credentials:
username: "harbor_registry_user"
password: "harbor_registry_password"
# If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD
existingSecret: ""
# Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
# htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
htpasswdString: ""
middleware:
enabled: false
type: cloudFront
cloudFront:
baseurl: example.cloudfront.net
keypairid: KEYPAIRID
duration: 3000s
ipfilteredby: none
# The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
# that allows access to CloudFront
privateKeySecret: "my-secret"
# enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
trivy:
# enabled the flag to enable Trivy scanner
enabled: true
image:
# repository the repository for Trivy adapter image
repository: goharbor/trivy-adapter-photon
# tag the tag for Trivy adapter image
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# replicas the number of Pod replicas
replicas: 1
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1
memory: 1Gi
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
debugMode: false
# vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
vulnType: "os,library"
# severity a comma-separated list of severities to be checked
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
# ignoreUnfixed the flag to display only fixed vulnerabilities
ignoreUnfixed: false
# insecure the flag to skip verifying registry certificate
insecure: false
# gitHubToken the GitHub access token to download Trivy DB
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
# timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
# Currently, the database is updated every 12 hours and published as a new release to GitHub.
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://v2.14.0eloper.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
gitHubToken: ""
# skipUpdate the flag to disable Trivy DB downloads from GitHub
#
# You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
# `/home/scanner/.cache/trivy/db/trivy.db` path.
skipUpdate: false
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
skipJavaDBUpdate: false
# The dbRepository and javaDBRepository flags can take multiple values, improving reliability when downloading databases.
# Databases are downloaded in priority order until one is successful.
# An attempt to download from the next repository is only made if a temporary error is received (e.g. status 429 or 5xx).
#
# OCI repository(ies) to retrieve the trivy vulnerability database in order of priority
dbRepository:
- "mirror.gcr.io/aquasec/trivy-db"
- "ghcr.io/aquasecurity/trivy-db"
# OCI repository(ies) to retrieve the Java trivy vulnerability database in order of priority
javaDBRepository:
- "mirror.gcr.io/aquasec/trivy-java-db"
- "ghcr.io/aquasecurity/trivy-java-db"
# The offlineScan option prevents Trivy from sending API requests to identify dependencies.
#
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesnt affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
offlineScan: false
# Comma-separated list of what security issues to detect. Defaults to `vuln`.
securityCheck: "vuln"
# The duration to wait for scan completion
timeout: 5m0s
database:
# if external database is used, set "type" to "external"
# and fill the connection information in "external" section
type: external
internal:
image:
repository: goharbor/harbor-db
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
# The timeout used in livenessProbe; 1 to 5 seconds
livenessProbe:
timeoutSeconds: 1
# The timeout used in readinessProbe; 1 to 5 seconds
readinessProbe:
timeoutSeconds: 1
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
extrInitContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# The initial superuser password for internal database
password: "harbor"
# The size limit for Shared memory, pgSQL use it for shared_buffer
# More details see:
# https://github.com/goharbor/harbor/issues/15034
shmSizeLimit: 512Mi
initContainer:
migrator: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
permissions: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
external:
host: "postgres14.postgres.svc.cluster.local"
port: "5432"
username: "harbor"
password: "harbor"
coreDatabase: "harbor"
# if using existing secret, the key must be "password"
existingSecret: ""
# "disable" - No SSL
# "require" - Always SSL (skip verification)
# "verify-ca" - Always SSL (verify that the certificate presented by the
# server was signed by a trusted CA)
# "verify-full" - Always SSL (verify that the certification presented by the
# server was signed by a trusted CA and the server host name matches the one
# in the certificate)
sslmode: "disable"
# The maximum number of connections in the idle connection pool per pod (core+exporter).
# If it <=0, no idle connections are retained.
maxIdleConns: 100
# The maximum number of open connections to the database per pod (core+exporter).
# If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for harbor's postgres.
maxOpenConns: 900
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
redis:
# if external Redis is used, set "type" to "external"
# and fill the connection information in "external" section
type: internal
internal:
image:
repository: goharbor/redis-photon
tag: v2.14.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
# containers to be run before the controller's container starts.
initContainers: []
# Example:
#
# - name: wait
# image: busybox
# command: [ 'sh', '-c', "sleep 20" ]
# # jobserviceDatabaseIndex defaults to "1"
# # registryDatabaseIndex defaults to "2"
# # trivyAdapterIndex defaults to "5"
# # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
# # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"
trivyAdapterIndex: "5"
# harborDatabaseIndex: "6"
# cacheLayerDatabaseIndex: "7"
external:
# support redis, redis+sentinel
# addr for redis: <host_redis>:<port_redis>
# addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
addr: "192.168.0.2:6379"
# The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
sentinelMasterSet: ""
# TLS configuration for redis connection
# only server-authentication is supported, mTLS for redis connection is not supported
# tls connection will be disable by default
# Once `tlsOptions.enable` set as true, tls/ssl connection will be used for redis
# Please set the `caBundleSecretName` in this configuration file which conatins redis server rootCA if it is self-signed.
# The secret must contain keys named "ca.crt" which will be injected into the trust store
tlsOptions:
enable: false
# The "coreDatabaseIndex" must be "0" as the library Harbor
# used doesn't support configuring it
# harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
# cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
coreDatabaseIndex: "0"
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"
trivyAdapterIndex: "5"
# harborDatabaseIndex: "6"
# cacheLayerDatabaseIndex: "7"
# username field can be an empty string, and it will be authenticated against the default user
username: ""
password: ""
# If using existingSecret, the key must be REDIS_PASSWORD, if ACL mode enabled, also inlcudes data of username, the keys must be REDIS_USERNAME
existingSecret: ""
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
exporter:
image:
repository: goharbor/harbor-exporter
tag: v2.14.0
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
revisionHistoryLimit: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
podAnnotations: {}
## Additional deployment labels
podLabels: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Spread Pods across failure-domains like regions, availability zones or nodes
topologySpreadConstraints: []
## The priority class to run the pod as
priorityClassName:
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# nodeTaintsPolicy: Honor
# whenUnsatisfiable: DoNotSchedule
cacheDuration: 23
cacheCleanInterval: 14400