826 lines
30 KiB
YAML
826 lines
30 KiB
YAML
expose:
|
||
# Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
|
||
# and fill the information in the corresponding section
|
||
type: clusterIP
|
||
tls:
|
||
# Enable TLS or not.
|
||
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
|
||
# Note: if the "expose.type" is "ingress" and TLS is disabled,
|
||
# the port must be included in the command when pulling/pushing images.
|
||
# Refer to https://github.com/goharbor/harbor/issues/5291 for details.
|
||
enabled: true
|
||
# The source of the tls certificate. Set as "auto", "secret"
|
||
# or "none" and fill the information in the corresponding section
|
||
# 1) auto: generate the tls certificate automatically
|
||
# 2) secret: read the tls certificate from the specified secret.
|
||
# The tls certificate can be generated manually or by cert manager
|
||
# 3) none: configure no tls certificate for the ingress. If the default
|
||
# tls certificate is configured in the ingress controller, choose this option
|
||
certSource: auto
|
||
auto:
|
||
# The common name used to generate the certificate, it's necessary
|
||
# when the type isn't "ingress"
|
||
commonName: "harbor-dev.allarddcs.nl"
|
||
secret:
|
||
# The name of secret which contains keys named:
|
||
# "tls.crt" - the certificate
|
||
# "tls.key" - the private key
|
||
secretName: "harbor-dev.allarddcs.nl-tls"
|
||
clusterIP:
|
||
# The name of ClusterIP service
|
||
name: harbor
|
||
# The ip address of the ClusterIP service (leave empty for acquiring dynamic ip)
|
||
staticClusterIP: ""
|
||
ports:
|
||
# The service port Harbor listens on when serving HTTP
|
||
httpPort: 80
|
||
# The service port Harbor listens on when serving HTTPS
|
||
httpsPort: 443
|
||
# Annotations on the ClusterIP service
|
||
annotations: {}
|
||
# ClusterIP-specific labels
|
||
labels: {}
|
||
|
||
# The external URL for Harbor core service. It is used to
|
||
# 1) populate the docker/helm commands showed on portal
|
||
# 2) populate the token service URL returned to docker client
|
||
#
|
||
# Format: protocol://domain[:port]. Usually:
|
||
# 1) if "expose.type" is "ingress", the "domain" should be
|
||
# the value of "expose.ingress.hosts.core"
|
||
# 2) if "expose.type" is "clusterIP", the "domain" should be
|
||
# the value of "expose.clusterIP.name"
|
||
# 3) if "expose.type" is "nodePort", the "domain" should be
|
||
# the IP address of k8s node
|
||
#
|
||
# If Harbor is deployed behind the proxy, set it as the URL of proxy
|
||
externalURL: https://harbor-dev.allarddcs.nl
|
||
|
||
# The persistence is enabled by default and a default StorageClass
|
||
# is needed in the k8s cluster to provision volumes dynamically.
|
||
# Specify another StorageClass in the "storageClass" or set "existingClaim"
|
||
# if you already have existing persistent volumes to use
|
||
#
|
||
# For storing images and charts, you can also use "azure", "gcs", "s3",
|
||
# "swift" or "oss". Set it in the "imageChartStorage" section
|
||
persistence:
|
||
enabled: true
|
||
# Setting it to "keep" to avoid removing PVCs during a helm delete
|
||
# operation. Leaving it empty will delete PVCs after the chart deleted
|
||
# (this does not apply for PVCs that are created for internal database
|
||
# and redis components, i.e. they are never deleted automatically)
|
||
resourcePolicy: "keep"
|
||
persistentVolumeClaim:
|
||
registry:
|
||
# Use the existing PVC which must be created manually before bound,
|
||
# and specify the "subPath" if the PVC is shared with other components
|
||
existingClaim: "harbor-pvc"
|
||
# Specify the "storageClass" used to provision the volume. Or the default
|
||
# StorageClass will be used (the default).
|
||
# Set it to "-" to disable dynamic provisioning
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 5Gi
|
||
annotations: {}
|
||
jobservice:
|
||
jobLog:
|
||
existingClaim: "harbor-jobservice-pvc"
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
# If external database is used, the following settings for database will
|
||
# be ignored
|
||
database:
|
||
existingClaim: ""
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
# If external Redis is used, the following settings for Redis will
|
||
# be ignored
|
||
redis:
|
||
existingClaim: "harbor-redis-pvc"
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 1Gi
|
||
annotations: {}
|
||
trivy:
|
||
existingClaim: "harbor-trivy-pvc"
|
||
storageClass: ""
|
||
subPath: ""
|
||
accessMode: ReadWriteOnce
|
||
size: 5Gi
|
||
annotations: {}
|
||
# The initial password of Harbor admin. Change it from portal after launching Harbor
|
||
# or give an existing secret for it
|
||
# key in secret is given via (default to HARBOR_ADMIN_PASSWORD)
|
||
existingSecretAdminPassword: ""
|
||
existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD
|
||
harborAdminPassword: "Harbor01@"
|
||
|
||
# The internal TLS used for harbor components secure communicating. In order to enable https
|
||
# in each component tls cert files need to provided in advance.
|
||
ipFamily:
|
||
# ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
|
||
ipv6:
|
||
enabled: true
|
||
# ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
|
||
ipv4:
|
||
enabled: true
|
||
|
||
# Sets the IP family policy for services to be able to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services).
|
||
policy: ""
|
||
# A list of IP families for services that should be supported, in the order in which they should be applied to ClusterIP. Can be "IPv4" and/or "IPv6".
|
||
families: []
|
||
|
||
imagePullPolicy: IfNotPresent
|
||
|
||
# Use this set to assign a list of default pullSecrets
|
||
imagePullSecrets:
|
||
# - name: docker-registry-secret
|
||
# - name: internal-registry-secret
|
||
|
||
# The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate"
|
||
# Set it as "Recreate" when "RWM" for volumes isn't supported
|
||
updateStrategy:
|
||
type: RollingUpdate
|
||
|
||
# debug, info, warning, error or fatal
|
||
logLevel: info
|
||
|
||
metrics:
|
||
enabled: false
|
||
core:
|
||
path: /metrics
|
||
port: 8001
|
||
registry:
|
||
path: /metrics
|
||
port: 8001
|
||
jobservice:
|
||
path: /metrics
|
||
port: 8001
|
||
exporter:
|
||
path: /metrics
|
||
port: 8001
|
||
## Create prometheus serviceMonitor to scrape harbor metrics.
|
||
## This requires the monitoring.coreos.com/v1 CRD. Please see
|
||
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md
|
||
##
|
||
serviceMonitor:
|
||
enabled: false
|
||
additionalLabels: {}
|
||
# Scrape interval. If not set, the Prometheus default scrape interval is used.
|
||
interval: ""
|
||
# Metric relabel configs to apply to samples before ingestion.
|
||
metricRelabelings:
|
||
[]
|
||
# - action: keep
|
||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||
# sourceLabels: [__name__]
|
||
# Relabel configs to apply to samples before ingestion.
|
||
relabelings:
|
||
[]
|
||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||
# separator: ;
|
||
# regex: ^(.*)$
|
||
# targetLabel: nodename
|
||
# replacement: $1
|
||
# action: replace
|
||
|
||
trace:
|
||
enabled: false
|
||
# trace provider: jaeger or otel
|
||
# jaeger should be 1.26+
|
||
provider: jaeger
|
||
# set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||
sample_rate: 1
|
||
# namespace used to differentiate different harbor services
|
||
# namespace:
|
||
# attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||
# attributes:
|
||
# application: harbor
|
||
jaeger:
|
||
# jaeger supports two modes:
|
||
# collector mode(uncomment endpoint and uncomment username, password if needed)
|
||
# agent mode(uncomment agent_host and agent_port)
|
||
endpoint: http://hostname:14268/api/traces
|
||
# username:
|
||
# password:
|
||
# agent_host: hostname
|
||
# export trace data by jaeger.thrift in compact mode
|
||
# agent_port: 6831
|
||
otel:
|
||
endpoint: hostname:4318
|
||
url_path: /v1/traces
|
||
compression: false
|
||
insecure: true
|
||
# timeout is in seconds
|
||
timeout: 10
|
||
|
||
|
||
|
||
# If service exposed via "ingress", the Nginx will not be used
|
||
nginx:
|
||
image:
|
||
repository: goharbor/nginx-photon
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
|
||
portal:
|
||
image:
|
||
repository: goharbor/harbor-portal
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## Additional service annotations
|
||
serviceAnnotations: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
|
||
core:
|
||
image:
|
||
repository: goharbor/harbor-core
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
## Startup probe values
|
||
startupProbe:
|
||
enabled: true
|
||
initialDelaySeconds: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## Additional service annotations
|
||
serviceAnnotations: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
## User settings configuration json string
|
||
configureUserSettings:
|
||
# The provider for updating project quota(usage), there are 2 options, redis or db.
|
||
# By default it is implemented by db but you can configure it to redis which
|
||
# can improve the performance of high concurrent pushing to the same project,
|
||
# and reduce the database connections spike and occupies.
|
||
# Using redis will bring up some delay for quota usage updation for display, so only
|
||
# suggest switch provider to redis if you were ran into the db connections spike around
|
||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||
quotaUpdateProvider: db # Or redis
|
||
# Secret is used when core server communicates with other components.
|
||
# If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
# Fill in the name of a kubernetes secret if you want to use your own
|
||
# If using existingSecret, the key must be secret
|
||
existingSecret: ""
|
||
# Fill the name of a kubernetes secret if you want to use your own
|
||
# TLS certificate and private key for token encryption/decryption.
|
||
# The secret must contain keys named:
|
||
# "tls.key" - the private key
|
||
# "tls.crt" - the certificate
|
||
secretName: ""
|
||
# If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead.
|
||
# If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated.
|
||
# tokenKey and tokenCert must BOTH be set or BOTH unset.
|
||
# The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line.
|
||
tokenKey: |
|
||
# If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line.
|
||
tokenCert: |
|
||
# The XSRF key. Will be generated automatically if it isn't specified
|
||
# While you specified, Please make sure it is 32 characters, otherwise would have validation issue at the harbor-core runtime
|
||
# https://github.com/goharbor/harbor/pull/21154
|
||
xsrfKey: ""
|
||
# If using existingSecret, the key is defined by core.existingXsrfSecretKey
|
||
existingXsrfSecret: ""
|
||
# If using existingSecret, the key
|
||
existingXsrfSecretKey: CSRF_KEY
|
||
# The time duration for async update artifact pull_time and repository
|
||
# pull_count, the unit is second. Will be 10 seconds if it isn't set.
|
||
# eg. artifactPullAsyncFlushDuration: 10
|
||
artifactPullAsyncFlushDuration:
|
||
gdpr:
|
||
deleteUser: false
|
||
auditLogsCompliant: false
|
||
|
||
jobservice:
|
||
image:
|
||
repository: goharbor/harbor-jobservice
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints:
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
maxJobWorkers: 10
|
||
# The logger for jobs: "file", "database" or "stdout"
|
||
jobLoggers:
|
||
- file
|
||
# - database
|
||
# - stdout
|
||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||
loggerSweeperDuration: 14 #days
|
||
notification:
|
||
webhook_job_max_retry: 3
|
||
webhook_job_http_client_timeout: 3 # in seconds
|
||
reaper:
|
||
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
|
||
max_update_hours: 24
|
||
# the max time for execution in running state without new task created
|
||
max_dangling_hours: 168
|
||
# Secret is used when job service communicates with other components.
|
||
# If a secret key is not specified, Helm will generate one.
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
# Use an existing secret resource
|
||
existingSecret: ""
|
||
# Key within the existing secret for the job service secret
|
||
existingSecretKey: JOBSERVICE_SECRET
|
||
|
||
registry:
|
||
registry:
|
||
image:
|
||
repository: goharbor/registry-photon
|
||
tag: v2.14.0
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
controller:
|
||
image:
|
||
repository: goharbor/harbor-registryctl
|
||
tag: v2.14.0
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
# Secret is used to secure the upload state from client
|
||
# and registry storage backend.
|
||
# See: https://github.com/distribution/distribution/blob/release/2.8/docs/configuration.md#http
|
||
# If a secret key is not specified, Helm will generate one.
|
||
# Must be a string of 16 chars.
|
||
secret: ""
|
||
# Use an existing secret resource
|
||
existingSecret: ""
|
||
# Key within the existing secret for the registry service secret
|
||
existingSecretKey: REGISTRY_HTTP_SECRET
|
||
# If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
|
||
relativeurls: false
|
||
credentials:
|
||
username: "harbor_registry_user"
|
||
password: "harbor_registry_password"
|
||
# If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD
|
||
existingSecret: ""
|
||
# Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt.
|
||
# htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string
|
||
htpasswdString: ""
|
||
middleware:
|
||
enabled: false
|
||
type: cloudFront
|
||
cloudFront:
|
||
baseurl: example.cloudfront.net
|
||
keypairid: KEYPAIRID
|
||
duration: 3000s
|
||
ipfilteredby: none
|
||
# The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
|
||
# that allows access to CloudFront
|
||
privateKeySecret: "my-secret"
|
||
# enable purge _upload directories
|
||
upload_purging:
|
||
enabled: true
|
||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||
age: 168h
|
||
# the interval of the purge operations
|
||
interval: 24h
|
||
dryrun: false
|
||
|
||
trivy:
|
||
# enabled the flag to enable Trivy scanner
|
||
enabled: true
|
||
image:
|
||
# repository the repository for Trivy adapter image
|
||
repository: goharbor/trivy-adapter-photon
|
||
# tag the tag for Trivy adapter image
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
# replicas the number of Pod replicas
|
||
replicas: 1
|
||
resources:
|
||
requests:
|
||
cpu: 200m
|
||
memory: 512Mi
|
||
limits:
|
||
cpu: 1
|
||
memory: 1Gi
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
|
||
debugMode: false
|
||
# vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
|
||
vulnType: "os,library"
|
||
# severity a comma-separated list of severities to be checked
|
||
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
|
||
# ignoreUnfixed the flag to display only fixed vulnerabilities
|
||
ignoreUnfixed: false
|
||
# insecure the flag to skip verifying registry certificate
|
||
insecure: false
|
||
# gitHubToken the GitHub access token to download Trivy DB
|
||
#
|
||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||
# in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
|
||
# timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
|
||
# Currently, the database is updated every 12 hours and published as a new release to GitHub.
|
||
#
|
||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||
# https://v2.14.0eloper.github.com/v3/#rate-limiting
|
||
#
|
||
# You can create a GitHub token by following the instructions in
|
||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||
gitHubToken: ""
|
||
# skipUpdate the flag to disable Trivy DB downloads from GitHub
|
||
#
|
||
# You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||
# If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
|
||
# `/home/scanner/.cache/trivy/db/trivy.db` path.
|
||
skipUpdate: false
|
||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||
skipJavaDBUpdate: false
|
||
# The dbRepository and javaDBRepository flags can take multiple values, improving reliability when downloading databases.
|
||
# Databases are downloaded in priority order until one is successful.
|
||
# An attempt to download from the next repository is only made if a temporary error is received (e.g. status 429 or 5xx).
|
||
#
|
||
# OCI repository(ies) to retrieve the trivy vulnerability database in order of priority
|
||
dbRepository:
|
||
- "mirror.gcr.io/aquasec/trivy-db"
|
||
- "ghcr.io/aquasecurity/trivy-db"
|
||
# OCI repository(ies) to retrieve the Java trivy vulnerability database in order of priority
|
||
javaDBRepository:
|
||
- "mirror.gcr.io/aquasec/trivy-java-db"
|
||
- "ghcr.io/aquasecurity/trivy-java-db"
|
||
# The offlineScan option prevents Trivy from sending API requests to identify dependencies.
|
||
#
|
||
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||
# It would work if all the dependencies are in local.
|
||
# This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment.
|
||
offlineScan: false
|
||
# Comma-separated list of what security issues to detect. Defaults to `vuln`.
|
||
securityCheck: "vuln"
|
||
# The duration to wait for scan completion
|
||
timeout: 5m0s
|
||
|
||
database:
|
||
# if external database is used, set "type" to "external"
|
||
# and fill the connection information in "external" section
|
||
type: external
|
||
internal:
|
||
image:
|
||
repository: goharbor/harbor-db
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
# The timeout used in livenessProbe; 1 to 5 seconds
|
||
livenessProbe:
|
||
timeoutSeconds: 1
|
||
# The timeout used in readinessProbe; 1 to 5 seconds
|
||
readinessProbe:
|
||
timeoutSeconds: 1
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
extrInitContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
# The initial superuser password for internal database
|
||
password: "harbor"
|
||
# The size limit for Shared memory, pgSQL use it for shared_buffer
|
||
# More details see:
|
||
# https://github.com/goharbor/harbor/issues/15034
|
||
shmSizeLimit: 512Mi
|
||
initContainer:
|
||
migrator: {}
|
||
# resources:
|
||
# requests:
|
||
# memory: 128Mi
|
||
# cpu: 100m
|
||
permissions: {}
|
||
# resources:
|
||
# requests:
|
||
# memory: 128Mi
|
||
# cpu: 100m
|
||
external:
|
||
host: "postgres14.postgres.svc.cluster.local"
|
||
port: "5432"
|
||
username: "harbor"
|
||
password: "harbor"
|
||
coreDatabase: "harbor"
|
||
# if using existing secret, the key must be "password"
|
||
existingSecret: ""
|
||
# "disable" - No SSL
|
||
# "require" - Always SSL (skip verification)
|
||
# "verify-ca" - Always SSL (verify that the certificate presented by the
|
||
# server was signed by a trusted CA)
|
||
# "verify-full" - Always SSL (verify that the certification presented by the
|
||
# server was signed by a trusted CA and the server host name matches the one
|
||
# in the certificate)
|
||
sslmode: "disable"
|
||
# The maximum number of connections in the idle connection pool per pod (core+exporter).
|
||
# If it <=0, no idle connections are retained.
|
||
maxIdleConns: 100
|
||
# The maximum number of open connections to the database per pod (core+exporter).
|
||
# If it <= 0, then there is no limit on the number of open connections.
|
||
# Note: the default number of connections is 1024 for harbor's postgres.
|
||
maxOpenConns: 900
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
|
||
redis:
|
||
# if external Redis is used, set "type" to "external"
|
||
# and fill the connection information in "external" section
|
||
type: internal
|
||
internal:
|
||
image:
|
||
repository: goharbor/redis-photon
|
||
tag: v2.14.0
|
||
# set the service account to be used, default if left empty
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# containers to be run before the controller's container starts.
|
||
initContainers: []
|
||
# Example:
|
||
#
|
||
# - name: wait
|
||
# image: busybox
|
||
# command: [ 'sh', '-c', "sleep 20" ]
|
||
# # jobserviceDatabaseIndex defaults to "1"
|
||
# # registryDatabaseIndex defaults to "2"
|
||
# # trivyAdapterIndex defaults to "5"
|
||
# # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
|
||
# # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
|
||
jobserviceDatabaseIndex: "1"
|
||
registryDatabaseIndex: "2"
|
||
trivyAdapterIndex: "5"
|
||
# harborDatabaseIndex: "6"
|
||
# cacheLayerDatabaseIndex: "7"
|
||
external:
|
||
# support redis, redis+sentinel
|
||
# addr for redis: <host_redis>:<port_redis>
|
||
# addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||
addr: "192.168.0.2:6379"
|
||
# The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
|
||
sentinelMasterSet: ""
|
||
# TLS configuration for redis connection
|
||
# only server-authentication is supported, mTLS for redis connection is not supported
|
||
# tls connection will be disable by default
|
||
# Once `tlsOptions.enable` set as true, tls/ssl connection will be used for redis
|
||
# Please set the `caBundleSecretName` in this configuration file which conatins redis server rootCA if it is self-signed.
|
||
# The secret must contain keys named "ca.crt" which will be injected into the trust store
|
||
tlsOptions:
|
||
enable: false
|
||
# The "coreDatabaseIndex" must be "0" as the library Harbor
|
||
# used doesn't support configuring it
|
||
# harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional
|
||
# cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional
|
||
coreDatabaseIndex: "0"
|
||
jobserviceDatabaseIndex: "1"
|
||
registryDatabaseIndex: "2"
|
||
trivyAdapterIndex: "5"
|
||
# harborDatabaseIndex: "6"
|
||
# cacheLayerDatabaseIndex: "7"
|
||
# username field can be an empty string, and it will be authenticated against the default user
|
||
username: ""
|
||
password: ""
|
||
# If using existingSecret, the key must be REDIS_PASSWORD, if ACL mode enabled, also inlcudes data of username, the keys must be REDIS_USERNAME
|
||
existingSecret: ""
|
||
## Additional deployment annotations
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
|
||
exporter:
|
||
image:
|
||
repository: goharbor/harbor-exporter
|
||
tag: v2.14.0
|
||
serviceAccountName: ""
|
||
# mount the service account token
|
||
automountServiceAccountToken: false
|
||
replicas: 1
|
||
revisionHistoryLimit: 10
|
||
# resources:
|
||
# requests:
|
||
# memory: 256Mi
|
||
# cpu: 100m
|
||
extraEnvVars: []
|
||
podAnnotations: {}
|
||
## Additional deployment labels
|
||
podLabels: {}
|
||
nodeSelector: {}
|
||
tolerations: []
|
||
affinity: {}
|
||
# Spread Pods across failure-domains like regions, availability zones or nodes
|
||
topologySpreadConstraints: []
|
||
## The priority class to run the pod as
|
||
priorityClassName:
|
||
# - maxSkew: 1
|
||
# topologyKey: topology.kubernetes.io/zone
|
||
# nodeTaintsPolicy: Honor
|
||
# whenUnsatisfiable: DoNotSchedule
|
||
cacheDuration: 23
|
||
cacheCleanInterval: 14400
|
||
|