initial commit

This commit is contained in:
allard
2025-11-23 18:58:51 +01:00
commit 376a944abc
1553 changed files with 314731 additions and 0 deletions

8
dev/README.md Normal file
View File

@@ -0,0 +1,8 @@
Alle configuraties van DEV-cluster:
argocd crate elasticsearch-kibana kafka pgadmin postgres16 tekton
backstage defectdojo gitea kubernetes phpmyadmin prometheus traefik
camunda deptrack grafana mariadb portainer rabbitmq trivy
catalog-info.yaml dnsutils harbor nexus postgres13 redis zabbix
cockroachdb docs hercules nginx postgres14 redmine
cosign drupal itop olproperties postgres15 sonarqube

2
dev/argocd/.argocdignore Normal file
View File

@@ -0,0 +1,2 @@
catalog-info.yaml
catalog-info.yml

120
dev/argocd/README.md Executable file
View File

@@ -0,0 +1,120 @@
#Installatie:
kubectl create ns argocd
#certificaat aanmaken:
kubectl apply -f argocd-certificate.yaml
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls-cert
namespace: argocd
spec:
secretName: argocd-tls-cert
dnsNames:
- argocd-dev.allarddcs.nl
issuerRef:
name: letsencrypt
kind: ClusterIssuer
Hier wordt dus een Certificate aangemaakt met naam "argocd-tls-cert":
NAME TYPE DATA AGE
argocd-tls-cert kubernetes.io/tls 2 76m
dat is opgeslagen in een secret "argocd-tls-cert":
NAME READY SECRET AGE
argocd-tls-cert True argocd-tls-cert 76m
#installeren via helm
helm install argocd -f values.yaml argo-cd/argo-cd -n argocd -f values.yaml
#values.yaml:
ingress:
server:
enabled: true
ingressClassName: traefik
hosts:
- host: argocd-dev.allarddcs.nl
paths:
- "/"
tls:
- hosts:
- argocd-dev.allarddcs.nl
secretName: argocd-tls-cert
configs:
params:
# disable insecure (HTTP)
server.insecure: "false"
server:
tls:
enabled: true
# name of the TLS secret (created via cert-manager)
secretName: argocd-tls-cert
Dit zorgt ervoor dat het eerder aangemaakte certificaat wordt gebruikt door argocd en
dat alleen verkeer via poort 443 mogelijk is.
#ingressroutes:
- door het LP-cluster loopt een ingressrouteTCP met tls: passtrough: true.
- in het DEV-cluster is alleen de ingressrouteTCP nodig:
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: argocd-route-tcp
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`argocd-dev.allarddcs.nl`)
priority: 10
services:
- name: argocd-server
port: 443
- match: HostSNI(`argocd-dev.allarddcs.nl`) && Headers(`Content-Type`, `application/grpc`)
priority: 11
services:
- name: argocd-server
port: 443
tls:
passthrough: true
of het tweede deel nodig is en werkt weet ik niet zeker. In ieder geval doet traefik GEEN tls-interrupt.
#Initieel password opvragen:
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
#gitea repository koppelen:
Checken of de repository in git aanwezig is.
project: default
https://gitea-dev.allarddcs.nl/AllardDCS/dev/olproperties (ZONDER.git!!!)
user: allard
password: Gitea01@
#applicatie toevoegen:
repository invullen
pad toevoegen (olproperties)
#api testen:
er staat een argocd binary op pisvrwsv00
argcd login https://argocd-dev.allarddcs
argocd app list
#task argocd-sync-and-wait installeren:
kubectl apply -f argocd-task-sync-and-wait.yaml
#testen kan met:
kubectl apply -f argocd-pipeline.yaml
kubectl create -f argocd-pipeline-run.yaml

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: argocd-tls-cert
namespace: argocd
spec:
secretName: argocd-tls-cert
dnsNames:
- argocd-dev.allarddcs.nl
issuerRef:
name: letsencrypt
kind: ClusterIssuer

View File

@@ -0,0 +1,17 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
annotations:
argocd.argoproj.io/hook: Skip
name: dev-argocd
title: Argocd (dev)
description: ArgoCD-configuratie
annotations:
backstage.io/kubernetes-label-selector: "app=argocd"
spec:
type: service
owner: allarddcs
subcomponentOf: component:default/DEV-cluster
lifecycle: production
docs:
path: ./README.md

View File

@@ -0,0 +1,15 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-http
namespace: argocd
spec:
entryPoints:
- web
routes:
- kind: Rule
match: Host("argocd-dev.allarddcs.nl")
services:
- kind: Service
name: argocd-server
port: 80

View File

@@ -0,0 +1,26 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-tls
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`argocd-dev.allarddcs.nl`)
priority: 10
services:
- kind: Service
name: argocd-server
port: 80
# - kind: Rule
# match: Host(`argocd-dev.allarddcs.nl`) && Headers(`Content-Type`, `application/grpc`)
# priority: 11
# services:
# - kind: Service
# name: argocd-server
# port: 80
# scheme: h2c
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,17 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-tls
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`argocd-dev.allarddcs.nl`)
services:
- kind: Service
name: argocd-server
port: 443
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-web
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd-dev.allarddcs.nl`)
kind: Rule
services:
- name: argocd-server
port: 443
tls:
secretName: argocd-tls-cert

View File

@@ -0,0 +1,22 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: argocd-route-tcp
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`argocd-dev.allarddcs.nl`)
priority: 10
services:
- name: argocd-server
port: 443
- match: HostSNI(`argocd-dev.allarddcs.nl`) && Headers(`Content-Type`, `application/grpc`)
priority: 11
services:
- name: argocd-server
port: 443
tls:
passthrough: true

View File

@@ -0,0 +1,46 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-http
namespace: argocd
spec:
entryPoints:
- web
routes:
- match: Host(`argocd.example.com`)
kind: Rule
middlewares:
- name: redirect-to-https
services:
- name: argocd-server
port: 80
scheme: https
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-https
namespace: argocd
spec:
redirectScheme:
scheme: https
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: argocd-https
namespace: argocd
spec:
entryPoints:
- websecure
routes:
- match: Host(`argocd.example.com`)
kind: Rule
services:
- name: argocd-server
port: 80
scheme: https
tls:
certResolver: letsencrypt

4264
dev/argocd/values.org Normal file

File diff suppressed because it is too large Load Diff

25
dev/argocd/values.yaml Normal file
View File

@@ -0,0 +1,25 @@
ingress:
server:
enabled: true
ingressClassName: traefik
hosts:
- host: argocd-dev.allarddcs.nl
paths:
- "/"
tls:
- hosts:
- argocd-dev.allarddcs.nl
secretName: argocd-tls-cert
configs:
params:
# disable insecure (HTTP)
server.insecure: "false"
server:
tls:
enabled: true
# name of the TLS secret (created via cert-manager)
secretName: argocd-tls-cert
# If you want HA, you can also configure replicas, etc.

View File

@@ -0,0 +1,2 @@
catalog-info.yaml
catalog-info.yml

24
dev/backstage/README.md Normal file
View File

@@ -0,0 +1,24 @@
#build container
setup.sh is een script waarmee vanuit de backstage git repo een docker image wordt gebouwd met daarin:
github, gitea, techdocs
#installatie
kubectl apply -f backstage.yaml
maakt connectie met postgres13 database
#na installatie:
als database connectie niet werkt controleren welke connectie-parameters geladen zijn door in de container:
node -e "console.log(require('knex')({
client: 'pg',
connection: process.env.DATABASE_URL
}).raw('select 1+1'))"
uit te voeren. Als je dan "connection undefined" ziet weet je hoe laat het is.

View File

@@ -0,0 +1,16 @@
# backstage-private-users-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: backstage-private-users
namespace: backstage
data:
allardkrings.yaml: |
apiVersion: backstage.io/v1alpha1
kind: User
metadata:
name: AllardKrings # must match GitHub username
email: admin@allarddcs.nl
spec:
memberOf:
- team:AllardDCS

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: backstage-secrets
namespace: backstage
type: Opaque
data:
GITEA_TOKEN: N2MyODlkODliMDI0ODk5ODRmYzk4NTA0MTFiYjI2ZjZlZTRlOWQzNw==

View File

@@ -0,0 +1,109 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: backstage
namespace: backstage
labels:
backstage.io/kubernetes-id: backstage
spec:
replicas: 1
selector:
matchLabels:
app: backstage
template:
metadata:
labels:
app: backstage
backstage.io/kubernetes-id: backstage
spec:
serviceAccountName: backstage
containers:
- name: backstage
image: allardkrings/backstage:1.44.0
imagePullPolicy: Always
env:
- name: PORT
value: "7007"
- name: POSTGRES_USER
value: backstage
- name: POSTGRES_PASSWORD
value: backstage
- name: POSTGRES_DB
value: backstage
- name: POSTGRES_SERVICE_HOST
value: postgres13.postgres.svc.cluster.local
- name: POSTGRES_SERVICE_PORT
value: "5432"
- name: APP_CONFIG_auth_environment
value: development
- name: NODE_ENV
value: development
- name: GITHUB_TOKEN
valueFrom:
secretKeyRef:
name: github-token
key: GITHUB_TOKEN
- name: GITEA_TOKEN
valueFrom:
secretKeyRef:
name: gitea-token
key: GITEA_TOKEN
volumeMounts:
# Mount the configmap as a single file
- mountPath: /app/app-config.production.yaml
subPath: app-config.yaml
name: app-configmap
# Mount the PVC as the TechDocs storage directory
- mountPath: /tmp/techdocs-storage
name: techdocs-storage
- name: private-users
mountPath: /backstage/catalog/private-users
volumes:
# ConfigMap for app config
- name: app-configmap
configMap:
name: backstage-app-config
# PVC for TechDocs storage
- name: techdocs-storage
persistentVolumeClaim:
claimName: backstage-pvc
- name: private-users
configMap:
name: backstage-private-users
---
apiVersion: v1
kind: Service
metadata:
name: backstage
namespace: backstage
labels:
backstage.io/kubernetes-id: backstage
spec:
type: ClusterIP
selector:
app: backstage
ports:
- name: http
port: 7007
targetPort: 7007
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: backstage-tls
namespace: backstage
labels:
backstage.io/kubernetes-id: backstage
spec:
entryPoints:
- websecure
routes:
- match: Host(`backstage-dev.allarddcs.nl`)
kind: Rule
services:
- name: backstage
port: 7007
tls:
secretName: backstage-dev.allarddcs.nl-tls

View File

@@ -0,0 +1,19 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-backstage
title: Backstage (dev)
description: Backstage instance running in Kubernetes
annotations:
backstage.io/kubernetes-id: backstage
links:
- url: https://github.com/AllardKrings/kubernetes/dev/backstage
title: backstage-configuratie
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
subcomponentOf: component:default/DEV-cluster

16
dev/backstage/certificate.yaml Executable file
View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: backstage-dev.allarddcs.nl-tls
namespace: backstage
spec:
dnsNames:
- backstage-dev.allarddcs.nl
issuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: letsencrypt
secretName: backstage-dev.allarddcs.nl-tls
usages:
- digital signature
- key encipherment

View File

@@ -0,0 +1,75 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: backstage
namespace: backstage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: backstage-k8s-reader
rules:
- apiGroups: [""]
resources:
- pods
- services
- configmaps
- namespaces
- endpoints
- limitranges
- resourcequotas
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources:
- deployments
- replicasets
- statefulsets
- daemonsets
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources:
- jobs
- cronjobs
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources:
- ingresses
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["get", "list", "watch"]
- apiGroups: ["metrics.k8s.io"]
resources:
- pods
verbs: ["get", "list"]
- apiGroups: ["traefik.containo.us"]
resources:
- ingressroutes
- ingressroutetcps
- ingressrouteudps
- middlewares
- middlewarestraefikio
- tlsoptions
- tlsstores
- traefikservices
- serverstransports
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: backstage-k8s-reader-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: backstage-k8s-reader
subjects:
- kind: ServiceAccount
name: backstage
namespace: backstage

View File

@@ -0,0 +1,143 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backstage-app-config
namespace: backstage
data:
app-config.yaml: |
app:
title: Backstage AllardDCS
baseUrl: https://backstage-dev.allarddcs.nl
extensions:
- entity-content:kubernetes/kubernetes
backend:
baseUrl: https://backstage-dev.allarddcs.nl
env:
PATH: /usr/local/bin:$PATH
listen:
port: 7007
cors:
origin: https://backstage-dev.allarddcs.nl
methods: [GET, POST, PUT, DELETE, PATCH]
credentials: true
csp:
connect-src: ["'self'", "http:", "https:"]
database:
client: pg
connection:
host: ${POSTGRES_SERVICE_HOST}
port: ${POSTGRES_SERVICE_PORT}
user: ${POSTGRES_USER}
password: ${POSTGRES_PASSWORD}
database: ${POSTGRES_DB}
reading:
allow:
- host: raw.githubusercontent.com
paths:
- /
cache:
memory: {}
trustProxy: true
log:
level: debug
logging:
logLevel: info
loggers:
catalog:
level: debug
backend:
level: debug
techdocs:
builder: 'local'
publisher:
type: 'local'
generator:
runIn: local
organization:
name: AllardDCS
permission:
rules:
- allow:
users:
- AllardKrings
integrations:
gitea:
- host: gitea-dev.allarddcs.nl
baseUrl: https://gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
token:
$env: GITEA_TOKEN
github:
- host: github.com
token:
$env: GITHUB_TOKEN
catalog:
providers:
github:
myGithub:
organization: 'AllardKrings'
catalogPath: '/**/catalog-info.yaml'
filters:
branch: 'master'
repository: 'kubernetes'
schedule:
frequency: { minutes: 30 }
timeout: { minutes: 3 }
gitea:
myGitea:
organization: 'allarddcs'
host: gitea-dev.allarddcs.nl
branch: 'master'
catalogPath: '/**/catalog-info.yaml'
schedule:
frequency: { minutes: 30 }
timeout: { minutes: 3 }
locations:
- type: url
target: https://gitea-dev.allarddcs.nl/AllardDCS/kubernetes/raw/branch/master/group.yaml
rules:
- allow: [Group]
- type: file
target: /backstage/catalog/private-users/allardkrings.yaml
rules:
- allow: [User]
processors:
gitea:
- host: gitea-dev.allarddcs.nl
apiBaseUrl: https://gitea-dev.allarddcs.nl/api/v1
kubernetes:
serviceLocatorMethod:
type: multiTenant
clusterLocatorMethods:
- type: config
clusters:
- name: local-cluster
url: https://kubernetes.default.svc
authProvider: serviceAccount
auth:
# see https://backstage.io/docs/auth/ to learn about auth providers
environment: development
providers:
# See https://backstage.io/docs/auth/guest/provider
guest: {}
github:
development:
clientId: Ov23lilVTWftNp9vMFwB
clientSecret: a687566a8d4871d30fe0126f150515531969d5fc
usePopup: false
signIn:
resolvers:
# Matches the GitHub username with the Backstage user entity name.
# See https://backstage.io/docs/auth/github/provider#resolvers for more resolvers.
- resolver: usernameMatchingUserEntityName

View File

@@ -0,0 +1,2 @@
microk8s kubectl create secret generic gitea-token -n backstage \
--from-literal=GITEA_TOKEN=7c289d89b02489984fc9850411bb26f6ee4e9d37

View File

@@ -0,0 +1 @@
7c289d89b02489984fc9850411bb26f6ee4e9d37

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: postgres-secrets
namespace: backstage
type: Opaque
data:
POSTGRES_USER: YmFja3N0YWdlCg==
POSTGRES_PASSWORD: YmFja3N0YWdlCg==

34
dev/backstage/pvc.yaml Normal file
View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: backstage-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/backstage/dev
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backstage-pvc
namespace: backstage
spec:
storageClassName: ""
volumeName: backstage-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

3
dev/backstage/restart.sh Executable file
View File

@@ -0,0 +1,3 @@
microk8s kubectl apply -f configmap.yaml
microk8s kubectl rollout restart deploy/backstage -n backstage
microk8s kubectl get pod -n backstage

View File

@@ -0,0 +1,222 @@
#!/bin/bash
set -e
# ------------------------
# Load NVM
# ------------------------
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# Use Node 20
nvm use 20
# ------------------------
# Configuration
# ------------------------
APP_NAME="backstage"
APP_DIR="$PWD/$APP_NAME"
echo "=== 1. Creating Backstage app ==="
# Use --ignore-existing to avoid cached/missing binaries
npx --ignore-existing @backstage/create-app@latest "$APP_DIR"
cd "$APP_DIR"
echo "=== 2. Bumping Backstage version to 1.42.1 ==="
yarn backstage-cli versions:bump --release 1.42.1
echo "=== 3. Installing plugin dependencies (Yarn 4 compatible) ==="
# Backend plugins
yarn --cwd packages/backend add \
@backstage/plugin-techdocs-backend \
@backstage/plugin-catalog-backend-module-github \
@backstage/plugin-catalog-backend-module-gitea \
@backstage/plugin-devtools-backend
# Frontend plugins
yarn --cwd packages/app add \
@backstage/plugin-techdocs \
@backstage/plugin-catalog \
@backstage/plugin-catalog-graph \
@backstage/plugin-techdocs-module-addons-contrib
echo "=== 4. Patching backend/src/index.ts ==="
BACKEND_FILE=packages/backend/src/index.ts
cat > "$BACKEND_FILE" <<'EOF'
import { createBackend } from '@backstage/backend-defaults';
import { createBackendFeatureLoader } from '@backstage/backend-plugin-api';
const backend = createBackend();
// Catalog
backend.add(import('@backstage/plugin-catalog-backend'));
backend.add(import('@backstage/plugin-catalog-backend-module-scaffolder-entity-model'));
backend.add(import('@backstage/plugin-catalog-backend-module-unprocessed'));
backend.add(import('@backstage/plugin-catalog-backend-module-github'));
backend.add(import('@backstage/plugin-catalog-backend-module-gitea'));
backend.add(import('@backstage/plugin-devtools-backend'));
// Scaffolder
backend.add(import('@backstage/plugin-scaffolder-backend'));
backend.add(import('@backstage/plugin-scaffolder-backend-module-github'));
backend.add(import('@backstage/plugin-scaffolder-backend-module-notifications'));
// Auth
backend.add(import('@backstage/plugin-auth-backend'));
backend.add(import('@backstage/plugin-auth-backend-module-guest-provider'));
// TechDocs
backend.add(import('@backstage/plugin-techdocs-backend'));
// Kubernetes
backend.add(import('@backstage/plugin-kubernetes-backend'));
// Search
const searchLoader = createBackendFeatureLoader({
*loader() {
yield import('@backstage/plugin-search-backend');
yield import('@backstage/plugin-search-backend-module-catalog');
yield import('@backstage/plugin-search-backend-module-techdocs');
},
});
backend.add(searchLoader);
// Misc
backend.add(import('@backstage/plugin-devtools-backend'));
backend.add(import('@backstage/plugin-app-backend'));
backend.add(import('@backstage/plugin-proxy-backend'));
backend.add(import('@backstage/plugin-permission-backend'));
backend.add(import('@backstage/plugin-permission-backend-module-allow-all-policy'));
backend.add(import('@backstage/plugin-notifications-backend'));
backend.add(import('@backstage/plugin-events-backend'));
backend.start();
EOF
echo "✓ Backend patched."
echo "=== 5. Patching packages/app/src/App.tsx ==="
APP_FILE=packages/app/src/App.tsx
cat > "$APP_FILE" <<'EOF'
import React from 'react';
import { createApp } from '@backstage/app-defaults';
import { FlatRoutes } from '@backstage/core-app-api';
import { Route, Navigate } from 'react-router-dom';
import { CatalogIndexPage, CatalogEntityPage } from '@backstage/plugin-catalog';
import { CatalogGraphPage } from '@backstage/plugin-catalog-graph';
import { ApiExplorerPage } from '@backstage/plugin-api-docs';
import { TechDocsIndexPage, TechDocsReaderPage } from '@backstage/plugin-techdocs';
import { ScaffolderPage } from '@backstage/plugin-scaffolder';
import { SearchPage } from '@backstage/plugin-search';
import { UserSettingsPage } from '@backstage/plugin-user-settings';
const app = createApp();
const routes = (
<FlatRoutes>
<Route path="/" element={<Navigate to="/catalog" />} />
<Route path="/catalog" element={<CatalogIndexPage />} />
<Route path="/catalog/:namespace/:kind/:name" element={<CatalogEntityPage />} />
<Route path="/catalog-graph" element={<CatalogGraphPage />} />
<Route path="/api-docs" element={<ApiExplorerPage />} />
<Route path="/docs" element={<TechDocsIndexPage />} />
<Route path="/docs/:namespace/:kind/:name/*" element={<TechDocsReaderPage />} />
<Route path="/search" element={<SearchPage />} />
<Route path="/create" element={<ScaffolderPage />} />
<Route path="/settings" element={<UserSettingsPage />} />
</FlatRoutes>
);
export default app.createRoot(routes);
EOF
echo "✓ App.tsx patched."
echo "=== 6. Installing all dependencies ==="
# Yarn 4 uses --immutable instead of --frozen-lockfile
yarn install --immutable
echo "=== 7. Building backend artifacts ==="
yarn workspace backend build
# Verify the build output
if [ ! -f packages/backend/dist/bundle.tar.gz ] || [ ! -f packages/backend/dist/skeleton.tar.gz ]; then
echo "❌ Backend build failed: required files not found!"
exit 1
fi
echo "✓ Backend build complete."
# -----------------------------
# 8a. Patch backend Dockerfile to include TechDocs/MkDocs + Yarn 4 support
# -----------------------------
DOCKERFILE=packages/backend/Dockerfile
cat > "$DOCKERFILE" <<'EOF'
# This dockerfile builds an image for the backend package.
# It should be executed with the root of the repo as docker context.
#
# Before building this image, be sure to have run the following commands in the repo root:
#
# yarn install
# yarn tsc
# yarn build:backend
#
# Once the commands have been run, you can build the image using `yarn build-image`
FROM node:20-bookworm-slim
# Install sqlite3 dependencies. You can skip this if you don't use sqlite3 in the image,
# in which case you should also move better-sqlite3 to "devDependencies" in package.json.
# Additionally, we install dependencies for `techdocs.generator.runIn: local`.
# https://backstage.io/docs/features/techdocs/getting-started#disabling-docker-in-docker-situation-optional
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && \
apt-get install -y --no-install-recommends libsqlite3-dev python3 python3-pip python3-venv build-essential && \
yarn config set python /usr/bin/python3
# Set up a virtual environment for mkdocs-techdocs-core.
ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN pip3 install mkdocs-techdocs-core==1.1.7
# From here on we use the least-privileged `node` user to run the backend.
WORKDIR /app
RUN chown node:node /app
USER node
# This switches many Node.js dependencies to production mode.
ENV NODE_ENV=production
# Copy over Yarn 3 configuration, release, and plugins
COPY --chown=node:node .yarn ./.yarn
COPY --chown=node:node .yarnrc.yml ./
# Copy repo skeleton first, to avoid unnecessary docker cache invalidation.
# The skeleton contains the package.json of each package in the monorepo,
# and along with yarn.lock and the root package.json, that's enough to run yarn install.
COPY --chown=node:node yarn.lock package.json packages/backend/dist/skeleton.tar.gz ./
RUN tar xzf skeleton.tar.gz && rm skeleton.tar.gz
RUN --mount=type=cache,target=/home/node/.yarn/berry/cache,sharing=locked,uid=1000,gid=1000 \
yarn workspaces focus --all --production
# Then copy the rest of the backend bundle, along with any other files we might want.
COPY --chown=node:node packages/backend/dist/bundle.tar.gz app-config*.yaml ./
RUN tar xzf bundle.tar.gz && rm bundle.tar.gz
CMD ["node", "packages/backend", "--config", "app-config.yaml"]
EOF
echo "✓ Backend Dockerfile patched with TechDocs + Yarn 4 support."
echo "=== 8. Building backend Docker image ==="
yarn workspace backend build-image
echo "✅ Backstage 1.42.1 setup complete with TechDocs!"
echo "Run with: docker run -p 7007:7007 <image_name>"

178
dev/backstage/setup.sh Executable file
View File

@@ -0,0 +1,178 @@
#!/usr/bin/env bash
set -euo pipefail
# ------------------------
# Configuration
# ------------------------
APP_NAME="backstage"
APP_DIR="$PWD/$APP_NAME"
BACKSTAGE_RELEASE="1.42.1"
NODE_VERSION_MIN=18
echo
echo "=== Backstage automated setup script ==="
echo "App dir: $APP_DIR"
echo "Target Backstage release: $BACKSTAGE_RELEASE"
echo
# Quick environment checks
command -v node >/dev/null 2>&1 || echo "Warning: node not found (need >= ${NODE_VERSION_MIN})"
command -v yarn >/dev/null 2>&1 || echo "Warning: yarn not found"
# ------------------------
# 1) Create Backstage app
# ------------------------
if [ -d "$APP_DIR" ]; then
echo "Directory $APP_DIR already exists — aborting to avoid overwriting."
exit 1
fi
echo "=== 1) Creating Backstage app ==="
npx --ignore-existing @backstage/create-app@latest "$APP_DIR"
cd "$APP_DIR"
# ------------------------
# 2) Bump Backstage versions
# ------------------------
echo "=== 2) Bumping Backstage packages to release $BACKSTAGE_RELEASE ==="
yarn backstage-cli versions:bump --release "$BACKSTAGE_RELEASE"
# ------------------------
# 3) Install backend plugins
# ------------------------
echo "=== 3) Installing backend plugins ==="
yarn --cwd packages/backend add \
@backstage/plugin-catalog-backend \
@backstage/plugin-catalog-backend-module-scaffolder-entity-model \
@backstage/plugin-catalog-backend-module-unprocessed \
@backstage/plugin-catalog-backend-module-github \
@backstage/plugin-catalog-backend-module-gitea \
@backstage/plugin-scaffolder-backend \
@backstage/plugin-scaffolder-backend-module-github \
@backstage/plugin-scaffolder-backend-module-notifications \
@backstage/plugin-auth-backend \
@backstage/plugin-techdocs-backend \
@backstage/plugin-kubernetes-backend \
@backstage/plugin-devtools-backend \
@backstage/plugin-app-backend \
@backstage/plugin-proxy-backend \
@backstage/plugin-permission-backend \
@backstage/plugin-permission-backend-module-allow-all-policy \
@backstage/plugin-notifications-backend \
@backstage/plugin-events-backend \
@backstage/plugin-search-backend \
@backstage/plugin-search-backend-module-catalog \
@backstage/plugin-search-backend-module-techdocs
# ------------------------
# 4) Install frontend plugins
# ------------------------
echo "=== 4) Installing frontend plugins ==="
yarn --cwd packages/app add \
@backstage/plugin-catalog \
@backstage/plugin-catalog-graph \
@backstage/plugin-catalog-import \
@backstage/plugin-techdocs \
@backstage/plugin-techdocs-module-addons-contrib \
@backstage/plugin-scaffolder \
@backstage/plugin-user-settings \
@backstage/plugin-search \
@backstage/plugin-api-docs \
@backstage/plugin-org
# ------------------------
# 5) Patch backend index.ts with static imports
# ------------------------
echo "=== 5) Patching backend index.ts ==="
BACKEND_FILE="packages/backend/src/index.ts"
mkdir -p "$(dirname "$BACKEND_FILE")"
cat > "$BACKEND_FILE" <<'EOF'
import { createBackend } from '@backstage/backend-defaults';
import { createBackendFeatureLoader } from '@backstage/backend-plugin-api';
import appBackend from '@backstage/plugin-app-backend';
import catalogBackend from '@backstage/plugin-catalog-backend';
import catalogScaffolderEntityModel from '@backstage/plugin-catalog-backend-module-scaffolder-entity-model';
import catalogUnprocessed from '@backstage/plugin-catalog-backend-module-unprocessed';
import catalogGithub from '@backstage/plugin-catalog-backend-module-github';
import catalogGitea from '@backstage/plugin-catalog-backend-module-gitea';
import scaffolderBackend from '@backstage/plugin-scaffolder-backend';
import scaffolderGithub from '@backstage/plugin-scaffolder-backend-module-github';
import scaffolderNotifications from '@backstage/plugin-scaffolder-backend-module-notifications';
import authBackend from '@backstage/plugin-auth-backend';
import techdocsBackend from '@backstage/plugin-techdocs-backend';
import kubernetesBackend from '@backstage/plugin-kubernetes-backend';
import devtoolsBackend from '@backstage/plugin-devtools-backend';
import proxyBackend from '@backstage/plugin-proxy-backend';
import permissionBackend from '@backstage/plugin-permission-backend';
import allowAllPolicy from '@backstage/plugin-permission-backend-module-allow-all-policy';
import notificationsBackend from '@backstage/plugin-notifications-backend';
import eventsBackend from '@backstage/plugin-events-backend';
const backend = createBackend();
backend.add(appBackend);
backend.add(catalogBackend);
backend.add(catalogScaffolderEntityModel);
backend.add(catalogUnprocessed);
backend.add(catalogGithub);
backend.add(catalogGitea);
backend.add(scaffolderBackend);
backend.add(scaffolderGithub);
backend.add(scaffolderNotifications);
backend.add(authBackend);
backend.add(techdocsBackend);
backend.add(kubernetesBackend);
backend.add(devtoolsBackend);
backend.add(proxyBackend);
backend.add(permissionBackend);
backend.add(allowAllPolicy);
backend.add(notificationsBackend);
backend.add(eventsBackend);
const searchLoader = createBackendFeatureLoader({
*loader() {
yield import('@backstage/plugin-search-backend');
yield import('@backstage/plugin-search-backend-module-catalog');
yield import('@backstage/plugin-search-backend-module-techdocs');
},
});
backend.add(searchLoader);
backend.start();
EOF
echo "✓ Backend index.ts patched."
# ------------------------
# 6) Do NOT overwrite App.tsx
# ------------------------
echo "=== 6) Preserving existing App.tsx ==="
# ------------------------
# 7) Install workspace dependencies
# ------------------------
echo "=== 7) Installing workspace dependencies ==="
yarn install
# ------------------------
# 8) Build backend bundle
# ------------------------
echo "=== 8) Building backend bundle ==="
yarn workspace backend build
# ------------------------
# 9) Build Docker image
# ------------------------
echo "=== 9) Building backend Docker image ==="
yarn workspace backend build-image
echo "=== DONE ==="
echo "Backstage app created at: $APP_DIR"
echo "Docker image built successfully. Run with: docker run -p 7007:7007 <image_name>"

205
dev/backstage/setup3.sh Executable file
View File

@@ -0,0 +1,205 @@
#!/usr/bin/env bash
set -euo pipefail
# ------------------------
# Configuration
# ------------------------
APP_NAME="backstage"
APP_DIR="$PWD/$APP_NAME"
BACKSTAGE_RELEASE="1.42.1"
NODE_VERSION_MIN=18
echo
echo "=== Backstage automated setup script ==="
echo "App dir: $APP_DIR"
echo "Target Backstage release: $BACKSTAGE_RELEASE"
echo
# Quick environment checks
command -v node >/dev/null 2>&1 || echo "Warning: node not found (need >= ${NODE_VERSION_MIN})"
command -v yarn >/dev/null 2>&1 || echo "Warning: yarn not found"
# ------------------------
# 1) Create Backstage app
# ------------------------
if [ -d "$APP_DIR" ]; then
echo "Directory $APP_DIR already exists — aborting to avoid overwriting."
exit 1
fi
echo "=== 1) Creating Backstage app ==="
npx --ignore-existing @backstage/create-app@latest "$APP_DIR"
cd "$APP_DIR"
# ------------------------
# 2) Bump Backstage versions
# ------------------------
echo "=== 2) Bumping Backstage packages to release $BACKSTAGE_RELEASE ==="
yarn backstage-cli versions:bump --release "$BACKSTAGE_RELEASE"
# ------------------------
# 3) Install backend plugins
# ------------------------
echo "=== 3) Installing backend plugins ==="
yarn --cwd packages/backend add \
@backstage/plugin-catalog-backend \
@backstage/plugin-catalog-backend-module-scaffolder-entity-model \
@backstage/plugin-catalog-backend-module-unprocessed \
@backstage/plugin-catalog-backend-module-github \
@backstage/plugin-catalog-backend-module-gitea \
@backstage/plugin-scaffolder-backend \
@backstage/plugin-scaffolder-backend-module-github \
@backstage/plugin-scaffolder-backend-module-notifications \
@backstage/plugin-auth-backend \
@backstage/plugin-auth-backend-module-guest-provider \
@backstage/plugin-techdocs-backend \
@backstage/plugin-kubernetes-backend \
@backstage/plugin-devtools-backend \
@backstage/plugin-app-backend \
@backstage/plugin-proxy-backend \
@backstage/plugin-permission-backend \
@backstage/plugin-permission-backend-module-allow-all-policy \
@backstage/plugin-notifications-backend \
@backstage/plugin-events-backend \
@backstage/plugin-search-backend \
@backstage/plugin-search-backend-module-catalog \
@backstage/plugin-search-backend-module-techdocs
# ------------------------
# 4) Install frontend plugins
# ------------------------
echo "=== 4) Installing frontend plugins ==="
yarn --cwd packages/app add \
@backstage/plugin-catalog \
@backstage/plugin-catalog-graph \
@backstage/plugin-catalog-import \
@backstage/plugin-techdocs \
@backstage/plugin-techdocs-module-addons-contrib \
@backstage/plugin-scaffolder \
@backstage/plugin-user-settings \
@backstage/plugin-search \
@backstage/plugin-api-docs \
@backstage/plugin-org
# ------------------------
# 5) Patch backend index.ts with static imports
# ------------------------
echo "=== 5) Patching backend index.ts ==="
BACKEND_FILE="packages/backend/src/index.ts"
mkdir -p "$(dirname "$BACKEND_FILE")"
cat > "$BACKEND_FILE" <<'EOF'
import { createBackend } from '@backstage/backend-defaults';
import { createBackendFeatureLoader } from '@backstage/backend-plugin-api';
import appBackend from '@backstage/plugin-app-backend';
import catalogBackend from '@backstage/plugin-catalog-backend';
import catalogScaffolderEntityModel from '@backstage/plugin-catalog-backend-module-scaffolder-entity-model';
import catalogUnprocessed from '@backstage/plugin-catalog-backend-module-unprocessed';
import catalogGithub from '@backstage/plugin-catalog-backend-module-github';
import catalogGitea from '@backstage/plugin-catalog-backend-module-gitea';
import scaffolderBackend from '@backstage/plugin-scaffolder-backend';
import scaffolderGithub from '@backstage/plugin-scaffolder-backend-module-github';
import scaffolderNotifications from '@backstage/plugin-scaffolder-backend-module-notifications';
import authBackend from '@backstage/plugin-auth-backend';
import guestProvider from '@backstage/plugin-auth-backend-module-guest-provider';
import techdocsBackend from '@backstage/plugin-techdocs-backend';
import kubernetesBackend from '@backstage/plugin-kubernetes-backend';
import devtoolsBackend from '@backstage/plugin-devtools-backend';
import proxyBackend from '@backstage/plugin-proxy-backend';
import permissionBackend from '@backstage/plugin-permission-backend';
import allowAllPolicy from '@backstage/plugin-permission-backend-module-allow-all-policy';
import notificationsBackend from '@backstage/plugin-notifications-backend';
import eventsBackend from '@backstage/plugin-events-backend';
const backend = createBackend();
backend.add(appBackend);
backend.add(catalogBackend);
backend.add(catalogScaffolderEntityModel);
backend.add(catalogUnprocessed);
backend.add(catalogGithub);
backend.add(catalogGitea);
backend.add(scaffolderBackend);
backend.add(scaffolderGithub);
backend.add(scaffolderNotifications);
backend.add(authBackend);
backend.add(guestProvider);
backend.add(techdocsBackend);
backend.add(kubernetesBackend);
backend.add(devtoolsBackend);
backend.add(proxyBackend);
backend.add(permissionBackend);
backend.add(allowAllPolicy);
backend.add(notificationsBackend);
backend.add(eventsBackend);
const searchLoader = createBackendFeatureLoader({
*loader() {
yield import('@backstage/plugin-search-backend');
yield import('@backstage/plugin-search-backend-module-catalog');
yield import('@backstage/plugin-search-backend-module-techdocs');
},
});
backend.add(searchLoader);
backend.start();
EOF
echo "✓ Backend index.ts patched."
# ------------------------
# 6) Do NOT overwrite App.tsx
# ------------------------
echo "=== 6) Preserving existing App.tsx ==="
# ------------------------
# 7) Install workspace dependencies
# ------------------------
echo "=== 7) Installing workspace dependencies ==="
yarn install
# ------------------------
# 8) Build backend bundle
# ------------------------
echo "=== 8) Building backend bundle ==="
yarn workspace backend build
# ------------------------
# 9) Patch backend Dockerfile for TechDocs
# ------------------------
DOCKERFILE="packages/backend/Dockerfile"
echo "=== Patching backend Dockerfile for TechDocs mkdocs ==="
# Insert mkdocs virtualenv only if not already patched
if ! grep -q "VIRTUAL_ENV=/opt/venv" "$DOCKERFILE"; then
cat >> "$DOCKERFILE" <<'EOF'
# --- TechDocs MkDocs virtualenv ---
USER root
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv git build-essential && rm -rf /var/lib/apt/lists/*
ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN pip3 install mkdocs-techdocs-core mkdocs-awesome-pages-plugin
USER node
EOF
echo "✓ Dockerfile patched with mkdocs virtualenv"
else
echo "✓ Dockerfile already patched"
fi
# ------------------------
# 10) Build Docker image
# ------------------------
echo "=== 10) Building backend Docker image ==="
yarn workspace backend build-image
echo "=== DONE ==="
echo "Backstage app created at: $APP_DIR"
echo "Docker image built successfully. Run with: docker run -p 7007:7007 <image_name>"

2
dev/camunda/README.md Executable file
View File

@@ -0,0 +1,2 @@
userid: demo
password: demo

52
dev/camunda/camunda.yaml Executable file
View File

@@ -0,0 +1,52 @@
apiVersion: v1
kind: Service
metadata:
name: camunda
namespace: camunda
spec:
selector:
app: camunda
ports:
- name: http
port: 8080
targetPort: 8080
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: camunda
namespace: camunda
spec:
replicas: 1
selector:
matchLabels:
app: camunda
template:
metadata:
labels:
app: camunda
spec:
containers:
- name: camunda
image: allardkrings/camunda7-arm64v8
ports:
- containerPort: 8080
imagePullPolicy: IfNotPresent
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: camunda-tls
namespace: camunda
spec:
entryPoints:
- websecure
routes:
- match: Host(`camunda-prod.allarddcs.nl`)
kind: Rule
services:
- name: camunda
port: 8080
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,17 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-camunda
title: Camunda (dev)
annotations:
backstage.io/kubernetes-id: camunda
links:
- url: https://github.com/AllardKrings/kubernetes/dev/camunda
title: camunda-configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: allarddcs
subcomponentOf: component:default/DEV-cluster

60
dev/catalog-info.yaml Normal file
View File

@@ -0,0 +1,60 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: DEV-cluster
namespace: default
description: deployments DEV-cluster
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/AllardKrings/kubernetes/dev/
title: AllardDCS DEV-cluster
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
children:
- component:default/dev-camunda
- component:default/dev-redis
- component:default/dev-postgres16
- component:default/dev-argocd
- component:default/dev-backstage
- component:default/dev-camunda
- component:default/dev-cockroachdb
- component:default/dev-cosign
- component:default/dev-crate
- component:default/dev-defectdojo
- component:default/dev-deptrack
- component:default/dev-dnsutils
- component:default/dev-docs
- component:default/dev-drupal
- component:default/dev-elasticsearch-kibana
- component:default/dev-gitea
- component:default/dev-grafana
- component:default/dev-harbor
- component:default/dev-hercules
- component:default/dev-itop
- component:default/dev-kafka
- component:default/dev-kubernetes
- component:default/dev-mariadb
- component:default/dev-nexus
- component:default/dev-nginx
- component:default/dev-olproperties
- component:default/dev-pgadmin
- component:default/dev-phpmyadmin
- component:default/dev-portainer
- component:default/dev-postgres13
- component:default/dev-postgres14
- component:default/dev-postgres15
- component:default/dev-postgres16
- component:default/dev-prometheus
- component:default/dev-rabbitmq
- component:default/dev-redis
- component:default/dev-redmine
- component:default/dev-sonarqube
- component:default/dev-tekton
- component:default/dev-traefik
- component:default/dev-trivy
- component:default/dev-zabbix

26
dev/cockroachdb/README.md Normal file
View File

@@ -0,0 +1,26 @@
#Installation:
#apply the CRD:
kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v2.14.0/install/crds.yaml
#install cockroachdb:
kubectl apply -f cockroachdb.yaml
#Initialiseren cluster:
kubectl exec -it cockroachdb-0 \
-- /cockroach/cockroach init \
--certs-dir=/cockroach/cockroach-certs
#Inloggen client:
kubectl exec -it cockroachdb-client-secure \
-- ./cockroach sql \
--certs-dir=/cockroach-certs \
--host=cockroachdb-public
#Gebruiker aanmaken:
CREATE USER roach WITH PASSWORD 'Cockroach01@';

View File

@@ -0,0 +1,10 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-cockroachdb
title: Cockroachdb (dev)
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
subcomponentOf: component:default/DEV-cluster

View File

@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDJTCCAg2gAwIBAgIQa/0mCEqslZ2d107ceEr9ATANBgkqhkiG9w0BAQsFADAr
MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y
NTAxMjUyMDIzNDRaFw0zNTAyMDMyMDIzNDRaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj
aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAvBJOTewyeYeWUncc7wx27bRCaDH7YawGyaltYypUzo93li+8K5Uw
VSYfy3mxNp47IQXebDPCQITct5pGq/EBTrWGJ/MLf8ZcCfPvvzylsqsesFFfS5y0
sYof+JzyowDOJflWsQnJLIK5kD32fvupvc0dKY8q/4WN/Ra1kiUm6ZcFYWVKJx2s
2ZVWcDP5xh+obCgP3F4cTsLjo1mkoRPMSLw5w9M5x3AiDgi6zwkcw9aUVq0lBciA
lI4cAHC4Awc1AP3OazYV/E+cC6dtzS+55KRGQIYOp/pkgBKsTAd2ahuZTh8ZWXyS
p30X0luRUO9wBksGEt5ixx5QdtOd0jQWLQIDAQABo0UwQzAOBgNVHQ8BAf8EBAMC
AuQwEgYDVR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQU5Olr9c4vu7OLVJrlGOtF
rdh5+qQwDQYJKoZIhvcNAQELBQADggEBALTZARd4BA0ke5O4a9G+1Om1P4L16fk9
R2uICKW1MEGg/1zDXZS/6dX+2xJrLLp3xhFcpFge78zi0MVyBfnrl0j+Uk+eSPar
iubS9S/qN7LkMKcZM8l2hZnPQ0bu6WbaKcH9Bu2KNcWdowsCLb7vgIEXkNPlxoKM
Q+lOZHorpLZgQph1Se7nnjhuXuqxzhxv5NlPVVy/ZiuoJ1FUn5nbS3vIvpGGiGsO
2bGltS2ADsfBNmCsRfgj1HutHERpUG+cvMsa9Wf9o3wuohUOzguPxxaL/Hpbxwp+
hnL13ksKb/bs45VHtYRQuZaUPoqTWvLRMIdMMxaLNMzE6Xyzc8h/dbA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDIDCCAgigAwIBAgIQJwncfRDbHgMyuJKxK0dKCDANBgkqhkiG9w0BAQsFADAr
MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y
NTAxMjUyMDIzNTdaFw0zMDAxMzAyMDIzNTdaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj
aDENMAsGA1UEAxMEcm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
ALzsZkbDiGNFg+jC16+eLzL5GvygvInkFljBgxJcrajRueq3KKfWlg1WTw6SqoiU
+c1uBiK8wiz17zkyo6K1lOabIlRutyAPZNnx7F+iBvhbMw8uzrlvWZKNCTWAJi4M
tLNDesSqmcCdEl+7ycJkGEmXyyDjGz+UtI6Bq5ax/MN9lc8CoKKAc6KzqiiYf0MR
6A2f5wwm8th8kT89HIt541LyElUr0JjttYOhrR0O82gF11Uf6OTYCxiySaHXTXpW
yYXXs6YsFaqm+Y3UZfnIk3jkwMPTYuQ3HoVe66YPB87JbPfMmiO4+NBGgqpSq2d9
n+l87zGJumwUaFQcq2s/1yUCAwEAAaNIMEYwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
JQQMMAoGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFOTpa/XOL7uzi1Sa5RjrRa3Yefqk
MA0GCSqGSIb3DQEBCwUAA4IBAQAyygcCWS9hC2/HI59i5IwirXxO6NXUJLQIrooz
z187fhAdfVGioAT6K1cU+NrrJgoFc9Znle4USjAgiCttfOu8ZXXySpm8kpwzlPCa
m7tg76cpOHB9Gw1vt4DQdgjTjBDiIMjQIa8BRdIgvjC0VodFMe950cBuYpTrX27W
KdFpsqWfD423uWPyVMxO/8k1E0epuHnLxqNEX55+yPM24PxiHVxsm6YSeViIAxj0
NXNXYSAoHQKob+8NysWT4QhrezdF8Cj6zbvlIrpJdmRiwcvbvBp4bnj6wg5OYAPM
pNqjII1A52ryOn5jVEfZvBb6s18ZIm9d/xGPugVsbJhBJy6S
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAvOxmRsOIY0WD6MLXr54vMvka/KC8ieQWWMGDElytqNG56rco
p9aWDVZPDpKqiJT5zW4GIrzCLPXvOTKjorWU5psiVG63IA9k2fHsX6IG+FszDy7O
uW9Zko0JNYAmLgy0s0N6xKqZwJ0SX7vJwmQYSZfLIOMbP5S0joGrlrH8w32VzwKg
ooBzorOqKJh/QxHoDZ/nDCby2HyRPz0ci3njUvISVSvQmO21g6GtHQ7zaAXXVR/o
5NgLGLJJoddNelbJhdezpiwVqqb5jdRl+ciTeOTAw9Ni5DcehV7rpg8Hzsls98ya
I7j40EaCqlKrZ32f6XzvMYm6bBRoVByraz/XJQIDAQABAoIBAAVHOYhKmDnlzEyp
fOssKTdsXEOonfvgQnuSVH4j1ro7uc0D9v/Rb/nJaoYGtPsB5oTFySgZS/eDm35m
msnF9vYGaYwgV79ujqvEJY16cmVn7uJCtYXaxY7hn9s9zFNHCZlkjj6GYatO+B9y
mK10rHUJ56PwlGdPWUgN+WRJbr1rbXJ0XhaNlR7d39XxrxFFI4MOvw2DNOvAOG6g
foIpA4ZeLhcGYIjsZxqrOZqVh1br4w5rWEvGqONi6LCrvwtMuNLAWExASkLJKIzw
vQ9jHpxYNqak0PHpsrHtUx50WsMt0ea1u/ioMKPNXs/Lkj18eGYpVI+S1wxDgKV+
m6K6uZUCgYEA9UKYCV1KiKAINTtwbTKHSa/vn/U6JKOLQUvPD2qpbVRdgS2R1mQS
soqeDW1d+Y4tRk/tnlmpolkuuNDxulr2CTm6wbgeU6TnF7pq7ClIZK3hv2VGTT3B
uXxx+cQ+zjqygAidopjLMUH/3aO7Ldw6gcuCLrjN1xEVJiD4IGTwxtsCgYEAxTJD
Fl9m5g3bCQPfpSfclI0weNPHIoVQ63IcqRHH+e0BR03YZWbq8lMl+t81q6G/rsIH
jD1Pl5RW9EhgguXOoMXeKVpT34M+gcJ0PdEI6+WZ3ZjJ0kwwPcypsA93aZmZx883
iksC2ZfIKqpCwguDKyvb5EcLNzrDSnMAl7NZOf8CgYEAoVqKg76ohnIidEMCmBSi
BMyGrYm8Eta1iuPA+beGd7MFQTMluxJjaqrfiJ3nMYNkLdnvzjnW7EQYBOcR4TRu
oWslfsUOzqCymF3AclZGllX/KtgKBE8Y4FsK8PM3Dp53SNxiONKk+2ccWkiZoHY+
1513rB1Q7qkCbO9LzqQZ8/kCgYEAgFAYPzKMrh1N7SvMFpc9fJvycmy7IsdExC9Y
XtrnGMUTE+afbDvvnQZlrDwZnDh/laNDbglnBObNPd7qjcIjFZIq4RWZhdLMlXqG
UML33ydjW0HT8TcKHOxTbfBibyA3ZEB9j0sH67ZL1Rc8oS8Ehs7fIkboEWP3NzZl
qFBXOtkCgYEAz9L2J9rpXQgwbPCOCjuPvm+zvAnGXdNgrUsVd8Tk1wczc5FyaBxw
DMgHo1BxELPETb0hNxEdQ0DdR83MXp0PZA1IG1XKcAH8CXloELwN3jpM+/6PHQRz
vdvkLPv3wM1Qdj4g6FlnPvlJHAlPytnDrUbSWxA6xMVYQJKw8na2Cm8=
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,24 @@
-----BEGIN CERTIFICATE-----
MIID+jCCAuKgAwIBAgIQI/uQsaTfs97kfvVSTD400zANBgkqhkiG9w0BAQsFADAr
MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y
NTAxMjUyMDI0MTBaFw0zMDAxMzAyMDI0MTBaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj
aDENMAsGA1UEAxMEbm9kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AJ8eplN7Xp2XZYJqlp+BvOh6sN0CqVo7tCbuXSt1ZpeC0EzRTU4u1j7cGhExzYSj
VUGootjPZIjB6OQu6JHzheubWUzYMXBC72PjKYbbwoE69b98GsIP9aJ3++0j5dln
TUP/SgiVf90w3ltb6MdlWX9VMpqsmCj3b1CqNfGT+Xc/pbSCN1oT7m5XUsaGkaux
BKp9QeI6Zii8q+qyt/U1+qFCE1AVMoJe/KRM3O3j+3G+90t/IKGnJj3wtSs8+BzC
FV2ZBPJcLsmL0are9yOVU+xhc8drLdefxZQiNL8nb3MgqQ/uVSfDhraMlna+mpxo
lLDm1Zm4AKlztwwxvIV+dT8CAwEAAaOCASAwggEcMA4GA1UdDwEB/wQEAwIFoDAd
BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHwYDVR0jBBgwFoAU5Olr9c4v
u7OLVJrlGOtFrdh5+qQwgckGA1UdEQSBwTCBvoIJbG9jYWxob3N0ghJjb2Nrcm9h
Y2hkYi1wdWJsaWOCGmNvY2tyb2FjaGRiLXB1YmxpYy5kZWZhdWx0gixjb2Nrcm9h
Y2hkYi1wdWJsaWMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbIINKi5jb2Nrcm9h
Y2hkYoIVKi5jb2Nrcm9hY2hkYi5kZWZhdWx0gicqLmNvY2tyb2FjaGRiLmRlZmF1
bHQuc3ZjLmNsdXN0ZXIubG9jYWyHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAIth
4wIOZDDcuNDtsy3dxB2q/6miFaO0p2/iUyMci3b1nwlLTliKzWGgOCwNGGR4UXOM
zVQ1bu8I2w4zY5xF047xQDQR+ek4HyOayxLlua1fVCVq4jxv23vgJA4Gv0IhUbay
TfjnDDFhijy9URzBoVAwXAx2hGu1PlFmZ1bHjre13s1mTohO3nMTA+GsMGkLk8FB
M5wWDP8UKC9zmUXPSFLEscLWzjJ015Y/tqZUMFWB4bFsGKAxdkBR2PTWbnDETfrJ
7HymCOLBFinbMs8m+NPz1j+B8MGlwi0Eu5SWxiyWkt5FtczBdMcgnuVhZBWqqxko
E13Q6CHbMt+P3Ky3FMQ=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAnx6mU3tenZdlgmqWn4G86Hqw3QKpWju0Ju5dK3Vml4LQTNFN
Ti7WPtwaETHNhKNVQaii2M9kiMHo5C7okfOF65tZTNgxcELvY+MphtvCgTr1v3wa
wg/1onf77SPl2WdNQ/9KCJV/3TDeW1vox2VZf1UymqyYKPdvUKo18ZP5dz+ltII3
WhPubldSxoaRq7EEqn1B4jpmKLyr6rK39TX6oUITUBUygl78pEzc7eP7cb73S38g
oacmPfC1Kzz4HMIVXZkE8lwuyYvRqt73I5VT7GFzx2st15/FlCI0vydvcyCpD+5V
J8OGtoyWdr6anGiUsObVmbgAqXO3DDG8hX51PwIDAQABAoIBAFvoOi3yDl58Ohew
NTwAlfq6Ezo09Vi3L4FlIM+fShitaF9WbY6BIyK/wxa3a3v3U6FPJHCSqgEL79cM
+SyEOpAx9Myb+0Jahyds6GmKubgnNBbcOiBpU3n6T7tThsmiD1D9PefjYi2CsoyW
c8foVF9l+Iq6slDHSraO+gWFcQxc/9CizRsInGqHA64anN6XvBZoVBLlu2Fowg4G
EducEOiGCekYLiOUDcLBegv57STIA/lTQ8pqFk7HcFYgg4NQhMFoS1E79zdlkZfq
j7X/DHMbt8zvRZIlWp1PrDYMysYVQVCT0PbaSd8+x9bUbDKkoMkgSj/NHsQXYn4a
muEhj+ECgYEAx8NZxZ9JU4NL5rNN2crfs/QPwxCgKp+BI41px9hqLOWKqDfMB7fI
EjOlLJveZ07sFF2Yf2gMkzCwdrmHc2g0Rj0Csqzss6Si3ppvD6EIwREnmziiJplR
mq6dQzgd5u1p9YcbIZhjzKFvRWy9JR4Kl/0A+h0zN8QupvxelRBslZkCgYEAy+ow
J9cTUqEeBL69BQU2CUTnc/MKCKGeTPRWqtKfODd7uglTaUgQ0DxDBoJxnS4ORcCN
9isT/UNJov8ufoZ1U8Kk+nBX++K5QFb46/TEomxeW+oabBg1+oLEPyqmd0H2p5er
JDsgsURUAcgKEV6ac11rzl2rwwfhgo9WVTB2+JcCgYEAwEeu32QFBpe4tWUdqGd4
kBR6H36fTKeffAMgMLaE7JY9stGSWFN0BuEjOh8GIlZ7MtcsdGZIxFz3XjASyukg
eAM915JPfFMaWj44bMjKTlwezW/j1Fd7jvJIeW1IiwE3HphfayTt2wgAvMh//3w9
IjLrf9QfeqwhY6ZDvCPFAPECgYBHUHfW9xkC5OYisrJYdyIWy8pGetEfg6ZhM3K7
+z1D4+OZhHlvcIywxuKJ/ETPu7OyIU2Esjwjbszp/GS+SzftOz2HeJLMvNYc8k3L
96ZtR4kYjB8BftYh7mnDzZ66Ro+EvT5VRXiBhmv604Lx4CwT/LAfVBMl+jOb/ZUr
5e81sQKBgEmLXN7NBs/3TXukSBwxvcixZWmgFVJIfrUhXN34p1T0BjaFKaTKREDZ
ulpnWImY9p/Q5ey1dpNlC3b9c/ZNseBXwOfmSP6TkaWpWBWNgwVOWMa6r6gPDVgZ
TlEn2zeJH+4YjrMZga0Aoeg7HcJondSV0s8jQqBhRNVZFSMjF+tA
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,3 @@
sudo mkdir -p /usr/local/lib/cockroach
sudo cp -i lib/libgeos.so /usr/local/lib/cockroach/
sudo cp -i lib/libgeos_c.so /usr/local/lib/cockroach/

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,289 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cockroachdb
# namespace: cockroachdb
labels:
app: cockroachdb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cockroachdb
# namespace: cockroachdb
labels:
app: cockroachdb
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cockroachdb
# namespace: cockroachdb
labels:
app: cockroachdb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cockroachdb
subjects:
- kind: ServiceAccount
name: cockroachdb
# namespace: default
---
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
# namespace: cockroachdb
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
# namespace: cockroachdb
labels:
app: cockroachdb
annotations:
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other CockroachDB pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
clusterIP: None
selector:
app: cockroachdb
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: cockroachdb-budget
# namespace: cockroachdb
labels:
app: cockroachdb
spec:
selector:
matchLabels:
app: cockroachdb
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
# namespace: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
serviceAccountName: cockroachdb
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v24.1.2
imagePullPolicy: IfNotPresent
args: ["-- insecure"]
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
# kubectl describe nodes
# Note that requests and limits should have identical values.
resources:
requests:
cpu: "2"
memory: "2Gi"
limits:
cpu: "2"
memory: "2Gi"
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases.
# livenessProbe:
# httpGet:
# path: "/health"
# port: http
# scheme: HTTPS
# initialDelaySeconds: 30
# periodSeconds: 5
readinessProbe:
httpGet:
path: "/health?ready=1"
port: http
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 2
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
- name: certs
mountPath: /cockroach/cockroach-certs
env:
- name: COCKROACH_CHANNEL
value: kubernetes-secure
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: "1"
- name: MEMORY_LIMIT_MIB
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: "1Mi"
command:
- "/bin/bash"
- "-ecx"
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
- exec
/cockroach/cockroach
start
--logtostderr
--certs-dir /cockroach/cockroach-certs
--advertise-host $(hostname -f)
--http-addr 0.0.0.0
--join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb
--cache $(expr $MEMORY_LIMIT_MIB / 4)MiB
--max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
- name: certs
secret:
secretName: cockroachdb.node
defaultMode: 256
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: cockroach-tls
# namespace: cockroachdb
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`cockroach-prod.allarddcs.nl`)
services:
- name: cockroachdb-public
port: 8080
tls:
passthrough: true
---
# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/bring-your-own-certs/client.yaml
# This config file demonstrates how to connect to the CockroachDB StatefulSet
# defined in bring-your-own-certs-statefulset.yaml that uses certificates
# created outside of Kubernetes. See that file for why you may want to use it.
# You should be able to adapt the core ideas to deploy your own custom
# applications and connect them to the database similarly.
#
# The pod that this file defines will sleep in the cluster not using any
# resources. After creating the pod, you can use it to open up a SQL shell to
# the database by running:
#
# kubectl exec -it cockroachdb-client-secure -- ./cockroach sql --url="postgres://root@cockroachdb-public:26257/?sslmode=verify-full&sslcert=/cockroach-certs/client.root.crt&sslkey=/cockroach-certs/client.root.key&sslrootcert=/cockroach-certs/ca.crt"
apiVersion: v1
kind: Pod
metadata:
name: cockroachdb-client-secure
# namespace: cockroachdb
labels:
app: cockroachdb-client
spec:
serviceAccountName: cockroachdb
containers:
- name: cockroachdb-client
image: cockroachdb/cockroach:v24.1.2
# Keep a pod open indefinitely so kubectl exec can be used to get a shell to it
# and run cockroach client commands, such as cockroach sql, cockroach node status, etc.
command:
- sleep
- "2147483648" # 2^31
volumeMounts:
- name: client-certs
mountPath: /cockroach-certs
volumes:
- name: client-certs
secret:
secretName: cockroachdb.client.root
defaultMode: 256

16
dev/cockroachdb/install.sh Executable file
View File

@@ -0,0 +1,16 @@
#!/bin/bash
rm -rf certs
rm -rf my-safe-directory
mkdir certs
mkdir my-safe-directory
cockroach cert create-ca --certs-dir=certs --ca-key=my-safe-directory/ca.key
cockroach cert create-client root --certs-dir=certs --ca-key=my-safe-directory/ca.key
#microk8s kubectl create ns cockroachdb
microk8s kubectl create secret generic cockroachdb.client.root --from-file=certs
cockroach cert create-node --certs-dir=certs --ca-key=my-safe-directory/ca.key localhost 127.0.0.1 cockroachdb-public cockroachdb-public.default cockroachdb-public.default.svc.cluster.local *.cockroachdb *.cockroachdb.default *.cockroachdb.default.svc.cluster.local
microk8s kubectl create secret generic cockroachdb.node --from-file=certs
microk8s kubectl create -f cockroachdb.yaml
microk8s kubectl get pod
microk8s kubectl exec -it cockroachdb-0 \
-- /cockroach/cockroach init \
--certs-dir=/cockroach/cockroach-certs

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAvBJOTewyeYeWUncc7wx27bRCaDH7YawGyaltYypUzo93li+8
K5UwVSYfy3mxNp47IQXebDPCQITct5pGq/EBTrWGJ/MLf8ZcCfPvvzylsqsesFFf
S5y0sYof+JzyowDOJflWsQnJLIK5kD32fvupvc0dKY8q/4WN/Ra1kiUm6ZcFYWVK
Jx2s2ZVWcDP5xh+obCgP3F4cTsLjo1mkoRPMSLw5w9M5x3AiDgi6zwkcw9aUVq0l
BciAlI4cAHC4Awc1AP3OazYV/E+cC6dtzS+55KRGQIYOp/pkgBKsTAd2ahuZTh8Z
WXySp30X0luRUO9wBksGEt5ixx5QdtOd0jQWLQIDAQABAoIBAQCwnCQqap7vnxLb
t/1UwojAKeGehSlCjFAHefI+CFeBbhpnz8XNy5iKrXV4F3wCBU8TcLZxN524Bsxa
Iicxee23YyFrTIJE6BowQoGmPSaBBM6Z1qA9mhfZDRN+3KvBxJTR9jaho8Xl5ZCq
UnWyw1Of6Aj1qPtA3sL6oyO47OiAu3Ph2+jlXBTlpmNQlz3BjansHpV0l9IsYY0H
dhAieMY4piYzB6LIFQUBH8T7gxnToPvgulSWaKV1mG7Xw/lSoj1YpDXXWYWMfiDB
Xl55Pyrp44J8+cdATGFIgk+ln5aeDQNtVV3wLIHsSrZaZ6ojFFpBY3qj4LvYmRjS
0Sj79ErFAoGBAN/riyjNfgSRs2wqsMPcVwetKHmP7we5wA8WAWMj1glDfjhNfHo1
J6gEYASc2ai44aK5P6XIGeAt1NmAAqaeJKKk1/fMUKbgCLLeG+Ds24Q9FTIigUpW
kMctLTHJ9mkr2xSNfBUrjwvsvnZKYox6tBcYPDsnpgj/lkEJ7S32S5MjAoGBANcD
/ElaTUHFOr/q6YALQUgw97xBSff1WLa5ESByUXrirpNyKchnU6hY1Ndo9snd4QZs
RZIsPEPBbR1hN2R/gTbUn2hVGPxLZ0wUs/IbsYPXAsunRD87g2gI0W++OR3sz5j4
p/6NodgsRcOmAXG1pZwJAFAJLTqUkTF0yXg8dS5vAoGACK6MRbe59BlmGIKLOfzY
Dv8iu5veC7GjBbK3uQ1RpihMw4gVlHNtJzGMO4GNWuJYNUPzeM0KW8vLHee9spId
H4U+rmfolJ/JFo5QDGeCl1z67meyFZzHnkFdKDoJaMh/hQt7TSLUOAUk2VdG/OVh
CCgzZaPC50RpofntjUOoaHsCgYBORvoq7kAgCKCZy/jUD8TldkZKd+5o4h4472kn
ydaWCT6LGU3S0qMnL6fVADaQSUGp5/LwA0CxXhLOVl0nLjApeQDLp+dfukfR79uO
8bwPhlBTOgLjjlQJpOQybSs4FMWDKEtopcFdBMklMCNodTvkcXZ2rNCVeg7d1Wmf
Z0s16wKBgA8KPg/7fEdmXItkbcVd2tyngCOo1NNXyGmZ7SnrkoXilyiKzZwmeUZl
PN27ciS/VpKTb278tNdQudmlBs28/McKddz9SnAKvTP/WbUXAh3gpeDTX9KVD7++
Z7wCBrQcb2z5WG2ojUwbYYZGjuouYJT2WGElDoOxRT4eCSbgj4kB
-----END RSA PRIVATE KEY-----

336
dev/cockroachdb/pvc.yaml Executable file
View File

@@ -0,0 +1,336 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cockroachdb
namespace: cockroachdb
labels:
app: cockroachdb
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cockroachdb
namespace: cockroachdb
labels:
app: cockroachdb
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cockroachdb
namespace: cockroachdb
labels:
app: cockroachdb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cockroachdb
subjects:
- kind: ServiceAccount
name: cockroachdb
namespace: default
---
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
namespace: cockroachdb
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
namespace: cockroachdb
labels:
app: cockroachdb
annotations:
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other CockroachDB pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
clusterIP: None
selector:
app: cockroachdb
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: cockroachdb-budget
namespace: cockroachdb
labels:
app: cockroachdb
spec:
selector:
matchLabels:
app: cockroachdb
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
namespace: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
serviceAccountName: cockroachdb
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v24.1.2
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
# kubectl describe nodes
# Note that requests and limits should have identical values.
resources:
requests:
cpu: "2"
memory: "2Gi"
limits:
cpu: "2"
memory: "2Gi"
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases.
# livenessProbe:
# httpGet:
# path: "/health"
# port: http
# scheme: HTTPS
# initialDelaySeconds: 30
# periodSeconds: 5
readinessProbe:
httpGet:
path: "/health?ready=1"
port: http
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 2
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
- name: certs
mountPath: /cockroach/cockroach-certs
env:
- name: COCKROACH_CHANNEL
value: kubernetes-secure
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: "1"
- name: MEMORY_LIMIT_MIB
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: "1Mi"
command:
- "/bin/bash"
- "-ecx"
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
- exec
/cockroach/cockroach
start
--logtostderr
--certs-dir /cockroach/cockroach-certs
--advertise-host $(hostname -f)
--http-addr 0.0.0.0
--join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb
--cache $(expr $MEMORY_LIMIT_MIB / 4)MiB
--max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
- name: certs
secret:
secretName: cockroachdb.node
defaultMode: 256
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-cockroachdb-0
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/cockroachdb/0
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-cockroachdb-1
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/cockroachdb/1
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-cockroachdb-2
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/cockroachdb/2
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: datadir-cockroachdb-0
namespace: cockroachdb
spec:
storageClassName: nfs-client
volumeName: datadir-cockroachdb-0
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: datadir-cockroachdb-1
namespace: cockroachdb
spec:
storageClassName: nfs-client
volumeName: datadir-cockroachdb-1
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: datadir-cockroachdb-2
namespace: cockroachdb
spec:
storageClassName: nfs-client
volumeName: datadir-cockroachdb-2
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,41 @@
#!/bin/bash
launcherJar=( server/plugins/org.jkiss.dbeaver.launcher*.jar )
echo "Starting CloudBeaver Enterprise Server"
[ ! -d "workspace/.metadata" ] && mkdir -p workspace/.metadata \
&& mkdir -p workspace/GlobalConfiguration/.dbeaver \
&& [ ! -f "workspace/GlobalConfiguration/.dbeaver/data-sources.json" ] \
&& cp conf/initial-data-sources.conf workspace/GlobalConfiguration/.dbeaver/data-sources.json
exec java ${JAVA_OPTS} \
-Dfile.encoding=UTF-8 \
--add-modules=ALL-SYSTEM \
--add-opens=java.base/java.io=ALL-UNNAMED \
--add-opens=java.base/java.lang=ALL-UNNAMED \
--add-opens=java.base/java.lang.reflect=ALL-UNNAMED \
--add-opens=java.base/java.net=ALL-UNNAMED \
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/java.nio.charset=ALL-UNNAMED \
--add-opens=java.base/java.text=ALL-UNNAMED \
--add-opens=java.base/java.time=ALL-UNNAMED \
--add-opens=java.base/java.util=ALL-UNNAMED \
--add-opens=java.base/java.util.concurrent=ALL-UNNAMED \
--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.vm=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED \
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
--add-opens=java.base/sun.security.action=ALL-UNNAMED \
--add-opens=java.base/sun.security.util=ALL-UNNAMED \
--add-opens=java.security.jgss/sun.security.jgss=ALL-UNNAMED \
--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED \
--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED \
--add-opens=java.sql/java.sql=ALL-UNNAMED \
-jar ${launcherJar} \
-product io.cloudbeaver.product.ee.product \
-data ${workspacePath} \
-web-config conf/cloudbeaver.conf \
-nl en \
-registryMultiLanguage

10
dev/cockroachdb/uninstall.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
microk8s kubectl delete -f cockroachdb.yaml
microk8s kubectl delete pvc datadir-cockroachdb-0 -n cockroachdb
microk8s kubectl delete pvc datadir-cockroachdb-1 -n cockroachdb
microk8s kubectl delete pvc datadir-cockroachdb-2 -n cockroachdb
microk8s kubectl delete secret cockroachdb.node -n cockroachdb
microk8s kubectl delete secret cockroachdb.client.root -n cockroachdb
microk8s kubectl delete ns cockroachdb
rm -rf certs
rm -rf my-safe-directory

40
dev/cosign/README.md Executable file
View File

@@ -0,0 +1,40 @@
#signing image with sbom
#generate sbom in spdx-format
syft quay.alldcs.nl/allard/olproperties:master -o spdx > olproperties.spdx
#attach the sbom to the image:
cosign attach sbom --sbom olproperties.spdx quay.alldcs.nl/allard/olproperties:master
WARNING: Attaching SBOMs this way does not sign them. If you want to sign them, use '
cosign attest --predicate olproperties.spdx --key <key path>' or 'cosign sign --key <key path> --attachment sbom <image uri>'
Uploading SBOM file for [quay.alldcs.nl/allard/olproperties:master] to [quay.alldcs.nl/allard/olproperties:sha256-4d79a08eb15ea8c9730e77fc54bea37299b4ed21d8b875d95fd54cd78e3556c9.sbom] with mediaType [text/spdx].
#singn the sbom:
cosing sign --key cosign.key quay.alldcs.nl/allard/olproperties:sha256-4d79a08eb15ea8c9730e77fc54bea37299b4ed21d8b875d95fd54cd78e3556c9.sbom
- output:
Enter password for private key:
WARNING: Image reference quay.alldcs.nl/allard/olproperties:sha256-4d79a08eb15ea8c9730e77fc54bea37299b4ed21d8b875d95fd54cd78e3556c9.sbom uses a tag, not a digest, to identify the image to sign.
This can lead you to sign a different image than the intended one. Please use a
digest (example.com/ubuntu@sha256:abc123...) rather than tag
(example.com/ubuntu:latest) for the input to cosign. The ability to refer to
images by tag will be removed in a future release.
The sigstore service, hosted by sigstore a Series of LF Projects, LLC, is provided pursuant to the Hosted Project Tools Terms of Use, available at https://lfprojects.org/policies/hosted-project-tools-terms-of-use/.
Note that if your submission includes personal data associated with this signed artifact, it will be part of an immutable record.
This may include the email address associated with the account with which you authenticate your contractual Agreement.
This information will be used for signing this artifact and will be stored in public transparency logs and cannot be removed later, and is subject to the Immutable Record notice at https://lfprojects.org/policies/hosted-project-tools-immutable-records/.
By typing 'y', you attest that (1) you are not submitting the personal data of any other person; and (2) you understand and agree to the statement and the Agreement terms at the URLs listed above.
Are you sure you would like to continue? [y/N] y
tlog entry created with index: 41682114
Pushing signature to: quay.alldcs.nl/allard/olproperties
#attest

View File

@@ -0,0 +1,10 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-cosign
title: Cosign (dev)
spec:
type: service
lifecycle: production
owner: allarddcs
subcomponentOf: component:default/DEV-cluster

11
dev/cosign/cosign.key Executable file
View File

@@ -0,0 +1,11 @@
-----BEGIN ENCRYPTED SIGSTORE PRIVATE KEY-----
eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6
OCwicCI6MX0sInNhbHQiOiJxL1Fzdkk2di9JQlFjN096Z1N2aFhtNllYbGpHemVv
OFhDS2lRUE1jK0RvPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94
Iiwibm9uY2UiOiJ1T2h2c1AyMkh1d2M5RGF3OTZRNkVPcFNTTHhmbG5BKyJ9LCJj
aXBoZXJ0ZXh0IjoicHcxdm5BSENQUmgrZmMrM0t6UjVQTzdUU1hjcGRsMkEvdmhW
T3JHS2IzRWxtWGlNS2l3Wlo5M2pFT1MvdjZic3hjWXlOL3NKcmY0Ulc0TVQreDNw
SXJWd1duTlJCUWhmZ0VLb0xLZXhKNktOcnhTa1R0OE8zT25nZE1XNlBzSVZueldl
dTdZUWQrRW9KQnRxalVqb1dXYTBtTjcyNVZKVTFUNkNWNlh1K1UxVHNtYndKOWtB
TUpYVkttNmJyQys4MFJDL3dCS0x2dnZmTXc9PSJ9
-----END ENCRYPTED SIGSTORE PRIVATE KEY-----

4
dev/cosign/cosign.pub Executable file
View File

@@ -0,0 +1,4 @@
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEhvRXr/p/gE2ZVuf/aq+RktGqLWyR
fVHwC7ROAnfKL5zcsO3Deoao5nBXESQ9/6P/YB9Zjrw82ST2N4+e6bzFkA==
-----END PUBLIC KEY-----

85579
dev/cosign/olproperties.spdx Executable file

File diff suppressed because it is too large Load Diff

1
dev/crate/alter_table Executable file
View File

@@ -0,0 +1 @@
ALTER TABLE iss SET ("blocks.read_only_allow_delete" = FALSE)

View File

@@ -0,0 +1,10 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-crate
title: Crate (dev)
spec:
type: service
lifecycle: production
owner: allarddcs
subcomponentOf: component:default/DEV-cluster

97
dev/crate/controler.yaml Executable file
View File

@@ -0,0 +1,97 @@
kind: StatefulSet
apiVersion: "apps/v1"
metadata:
# This is the name used as a prefix for all pods in the set.
name: crate
spec:
serviceName: "crate-set"
# Our cluster has three nodes.
replicas: 3
selector:
matchLabels:
# The pods in this cluster have the `app:crate` app label.
app: crate
template:
metadata:
labels:
app: crate
spec:
# InitContainers run before the main containers of a pod are
# started, and they must terminate before the primary containers
# are initialized. Here, we use one to set the correct memory
# map limit.
initContainers:
- name: init-sysctl
image: busybox
imagePullPolicy: IfNotPresent
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
# This final section is the core of the StatefulSet configuration.
# It defines the container to run in each pod.
containers:
- name: crate
# Use the CrateDB 5.1.1 Docker image.
image: crate:5.1.1
# Pass in configuration to CrateDB via command-line options.
# We are setting the name of the node's explicitly, which is
# needed to determine the initial master nodes. These are set to
# the name of the pod.
# We are using the SRV records provided by Kubernetes to discover
# nodes within the cluster.
args:
- -Cnode.name=${POD_NAME}
- -Ccluster.name=${CLUSTER_NAME}
- -Ccluster.initial_master_nodes=crate-0,crate-1,crate-2
- -Cdiscovery.seed_providers=srv
- -Cdiscovery.srv.query=_crate-internal._tcp.crate-internal-service.${NAMESPACE}.svc.cluster.local
- -Cgateway.recover_after_data_nodes=2
- -Cgateway.expected_data_nodes=${EXPECTED_NODES}
- -Cpath.data=/data
volumeMounts:
# Mount the `/data` directory as a volume named `data`.
- mountPath: /data
name: data
resources:
limits:
# How much memory each pod gets.
memory: 512Mi
ports:
# Port 4300 for inter-node communication.
- containerPort: 4300
name: crate-internal
# Port 4200 for HTTP clients.
- containerPort: 4200
name: crate-web
# Port 5432 for PostgreSQL wire protocol clients.
- containerPort: 5432
name: postgres
# Environment variables passed through to the container.
env:
# This is variable is detected by CrateDB.
- name: CRATE_HEAP_SIZE
value: "256m"
# The rest of these variables are used in the command-line
# options.
- name: EXPECTED_NODES
value: "3"
- name: CLUSTER_NAME
value: "my-crate"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeClaimTemplates:
# Use persistent storage.
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

98
dev/crate/crate-storage.yaml Executable file
View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: crate-pv-0
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/crate/crate-0
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: crate-pv-1
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/crate/crate-1
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: crate-pv-2
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/crate/crate-2
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-crate-0
spec:
storageClassName: ""
volumeName: crate-pv-0
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-crate-1
spec:
storageClassName: ""
volumeName: crate-pv-1
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-crate-2
spec:
storageClassName: ""
volumeName: crate-pv-2
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

4
dev/crate/create-table Executable file
View File

@@ -0,0 +1,4 @@
CREATE TABLE iss (
timestamp TIMESTAMP GENERATED ALWAYS AS CURRENT_TIMESTAMP,
position GEO_POINT
);

19
dev/crate/external-service.yaml Executable file
View File

@@ -0,0 +1,19 @@
kind: Service
apiVersion: v1
metadata:
name: crate-external-service
labels:
app: crate
spec:
# Create an externally reachable load balancer.
type: LoadBalancer
ports:
# Port 4200 for HTTP clients.
- port: 4200
name: crate-web
# Port 5432 for PostgreSQL wire protocol clients.
- port: 5432
name: postgres
selector:
# Apply this to all nodes with the `app:crate` label.
app: crate

15
dev/crate/ingressroute-tls.yaml Executable file
View File

@@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: cratedb-tls
spec:
entryPoints:
- websecure
routes:
- match: Host(`cratedb.alldcs.nl`)
kind: Rule
services:
- name: crate-ui
port: 4200
tls:
certResolver: letsencrypt

36
dev/crate/internal-service.yaml Executable file
View File

@@ -0,0 +1,36 @@
kind: Service
apiVersion: v1
metadata:
name: crate-internal-service
labels:
app: crate
spec:
# A static IP address is assigned to this service. This IP address is
# only reachable from within the Kubernetes cluster.
type: ClusterIP
ports:
# Port 4300 for inter-node communication.
- port: 4300
name: crate-internal
selector:
# Apply this to all nodes with the `app:crate` label.
app: crate
---
kind: Service
apiVersion: v1
metadata:
name: crate-ui
labels:
app: crate
spec:
# A static IP address is assigned to this service. This IP address is
# only reachable from within the Kubernetes cluster.
type: ClusterIP
ports:
# Port 4300 for inter-node communication.
- port: 4200
name: crate-web
selector:
# Apply this to all nodes with the `app:crate` label.
app: crate

20
dev/crate/iss.sh Executable file
View File

@@ -0,0 +1,20 @@
# Exit immediately if a pipeline returns a non-zero status
set -e
position () {
curl -s http://api.open-notify.org/iss-now.json |
jq -r '[.iss_position.longitude, .iss_position.latitude] | @tsv';
}
wkt_position () {
echo "POINT ($(position | expand -t 1))";
}
while true; do
crash --hosts 192.168.40.81:4200 \
--command "INSERT INTO iss (position) VALUES ('$(wkt_position)')"
echo 'Sleeping for 1 seconds...'
sleep 10
done

1
dev/crate/select Executable file
View File

@@ -0,0 +1 @@
SELECT "timestamp", "position" FROM "doc"."iss" LIMIT 1000;

View File

@@ -0,0 +1,16 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-defectdojo
title: Defectdojo (dev)
annotations:
backstage.io/kubernetes-label-selector: "app=defectdojo"
links:
- url: https://github.com/AllardKrings/kubernetes/dev/defectdojo
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: allarddcs
subcomponentOf: component:default/DEV-cluster

42
dev/defectdojo/helm/README.md Executable file
View File

@@ -0,0 +1,42 @@
#Installatie
https://epam.github.io/edp-install/operator-guide/install-defectdojo/
kubectl create namespace defectdojo
helm repo add defectdojo 'https://raw.githubusercontent.com/DefectDojo/django-DefectDojo/helm-charts'
helm repo update
Create PostgreSQL admin secret:
kubectl -n defectdojo create secret generic defectdojo-postgresql-specific \
--from-literal=postgresql-password=defectdojodefect \
--from-literal=postgresql-postgres-password=defectdojodefect
Create Rabbitmq admin secret:
kubectl -n defectdojo create secret generic defectdojo-rabbitmq-specific \
--from-literal=rabbitmq-password=defectdojo \
--from-literal=rabbitmq-erlang-cookie=defectdojodefectdojodefectdojojo
Create DefectDojo admin secret:
kubectl -n defectdojo create secret generic defectdojo \
--from-literal=DD_ADMIN_PASSWORD=defectdojodefectdojojo \
--from-literal=DD_SECRET_KEY=defectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefecdojojo \
--from-literal=DD_CREDENTIAL_AES_256_KEY=defectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefecdojojo \
--from-literal=METRICS_HTTP_AUTH_PASSWORD=defectdojodefectdojodefectdojojo
Install DefectDojo v.2.22.4 using defectdojo/defectdojo Helm chart v.1.6.69:
helm upgrade --install \
defectdojo \
--version 1.6.69 \
defectdojo/defectdojo \
--namespace defectdojo \
--values values.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,5 @@
microk8s kubectl -n defectdojo create secret generic defectdojo \
--from-literal=DD_ADMIN_PASSWORD=defectdojodefectdojojo \
--from-literal=DD_SECRET_KEY=defectdodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojo \
--from-literal=DD_CREDENTIAL_AES_256_KEY=defectdodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojodefectdojo \
--from-literal=METRICS_HTTP_AUTH_PASSWORD=defectdojodefectdojodefectdojojo -n defectdojo

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: defectdojo-http
namespace: defectdojo
spec:
entryPoints:
- web
routes:
- match: Host(`defectdojo-dev.allarddcs.nl`)
kind: Rule
services:
- name: defectdojo-django
port: 80

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: defectdojo-tls
namespace: defectdojo
spec:
entryPoints:
- websecure
routes:
- match: Host(`defectdojo-dev.allarddcs.nl`)
kind: Rule
services:
- name: defectdojo-django
port: 80
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,68 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-postgres-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/postgres
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-defectdojo-postgresql-0
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-postgres-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-rabbitmq-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/rabbitmq
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-defectdojo-rabbitmq-0
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-rabbitmq-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,3 @@
microk8s kubectl -n defectdojo create secret generic defectdojo-postgresql-specific \
--from-literal=postgresql-password=defectdojo \
--from-literal=postgresql-postgres-password=defectdojo -n defectdojo

View File

@@ -0,0 +1,3 @@
microk8s kubectl -n defectdojo create secret generic defectdojo-rabbitmq-specific \
--from-literal=rabbitmq-password=mqrabbitmq \
--from-literal=rabbitmq-erlang-cookie=rabbitmqrabbitmqrabbitmqrabbitmq -n defectdojo

View File

@@ -0,0 +1,552 @@
---
# Global settings
# create defectdojo specific secret
createSecret: false
# create rabbitmq secret in defectdojo chart, outside of rabbitmq chart
createRabbitMqSecret: false
# create redis secret in defectdojo chart, outside of redis chart
createRedisSecret: false
# create mysql secret in defectdojo chart, outside of mysql chart
createMysqlSecret: false
# create postgresql secret in defectdojo chart, outside of postgresql chart
createPostgresqlSecret: false
# create postgresql-ha secret in defectdojo chart, outside of postgresql-ha chart
createPostgresqlHaSecret: false
# create postgresql-ha-pgpool secret in defectdojo chart, outside of postgresql-ha chart
createPostgresqlHaPgpoolSecret: false
# Track configuration (trackConfig): will automatically respin application pods in case of config changes detection
# can be:
# - disabled, default
# - enabled, enables tracking configuration changes based on SHA256
# trackConfig: disabled
# Enables application network policy
# For more info follow https://kubernetes.io/docs/concepts/services-networking/network-policies/
networkPolicy:
enabled: false
# if additional labels need to be allowed (e.g. prometheus scraper)
ingressExtend: []
# ingressExtend:
# - podSelector:
# matchLabels:
# app.kubernetes.io/instance: defectdojo-prometheus
egress: []
# egress:
# - to:
# - ipBlock:
# cidr: 10.0.0.0/24
# ports:
# - protocol: TCP
# port: 443
# Configuration value to select database type
# Option to use "postgresql" or "mysql" database type, by default "mysql" is chosen
# Set the "enable" field to true of the database type you select (if you want to use internal database) and false of the one you don't select
database: postgresql
# Primary hostname of instance
host: defectdojo.default.minikube.local
# The full URL to your defectdojo instance, depends on the domain where DD is deployed, it also affects links in Jira
# site_url: 'https://<yourdomain>'
# optional list of alternative hostnames to use that gets appended to
# DD_ALLOWED_HOSTS. This is necessary when your local hostname does not match
# the global hostname.
# alternativeHosts:
# - defectdojo.example.com
imagePullPolicy: Always
# Where to pull the defectDojo images from. Defaults to "defectdojo/*" repositories on hub.docker.com
repositoryPrefix: defectdojo
# When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project)
# Create secrets as: kubectl create secret docker-registry defectdojoregistrykey --docker-username=registry_username --docker-password=registry_password --docker-server='https://index.docker.io/v1/'
# imagePullSecrets: defectdojoregistrykey
tag: latest
# Additional labels to add to the pods:
# podLabels:
# key: value
podLabels: {}
# Allow overriding of revisionHistoryLimit across all deployments.
# revisionHistoryLimit: 10
securityContext:
enabled: true
djangoSecurityContext:
# django dockerfile sets USER=1001
runAsUser: 1001
nginxSecurityContext:
# nginx dockerfile sets USER=1001
runAsUser: 1001
tests:
unitTests:
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
admin:
user: admin
password:
firstName: Administrator
lastName: User
mail: admin@defectdojo.local
secretKey:
credentialAes256Key:
metricsHttpAuthPassword:
monitoring:
enabled: false
# Add the nginx prometheus exporter sidecar
prometheus:
enabled: false
image: nginx/nginx-prometheus-exporter:0.11.0
imagePullPolicy: IfNotPresent
annotations: {}
# Components
celery:
broker: rabbitmq
# To use an external celery broker, set the hostname here
brokerHost: ""
logLevel: INFO
beat:
annotations: {}
affinity: {}
nodeSelector: {}
replicas: 1
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 256Mi
tolerations: []
worker:
annotations: {}
affinity: {}
logLevel: INFO
nodeSelector: {}
replicas: 1
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 512Mi
tolerations: []
app_settings:
pool_type: solo
# Performance improved celery worker config when needing to deal with a lot of findings (e.g deduplication ops)
# Comment out the "solo" line, and uncomment the following lines.
# pool_type: prefork
# autoscale_min: 2
# autoscale_max: 8
# concurrency: 8
# prefetch_multiplier: 128
# A list of extra volumes to mount. This
# is useful for bringing in extra data that can be referenced by other configurations
# at a well known path, such as local_settings. The
# value of this should be a list of objects.
#
# Example:
#
# ```yaml
# extraVolumes:
# - type: configMap
# name: local_settings
# path: /app/dojo/settings/local_settings.py
# subPath: local_settings.py
# - type: hostPath
# name: host_directory
# path: /tmp
# hostPath: /tmp
# ```
#
# Each object supports the following keys:
#
# - `type` - Type of the volume, must be one of "configMap", "secret", "hostPath". Case sensitive.
# Even is supported we are highly recommending to avoid hostPath for security reasons (usually blocked by PSP)
# - `name` - Name of the configMap or secret to be mounted. This also controls
# the path that it is mounted to. The volume will be mounted to `/consul/userconfig/<name>`.
# - `path` - defines where file should be exposed
# - `subPath` - extracts only particular file from secret or configMap
# - `pathType` - only for hostPath, can be one of the "DirectoryOrCreate", "Directory" (default), "FileOrCreate",
# "File", "Socket", "CharDevice", "BlockDevice"
# - `hostPath` - only for hostPath, file or directory from local host
# @type: array<map>
extraVolumes: []
django:
annotations: {}
service:
annotations: {}
affinity: {}
ingress:
enabled: true
ingressClassName: ""
activateTLS: true
secretName: defectdojo-tls
annotations: {}
# Restricts the type of ingress controller that can interact with our chart (nginx, traefik, ...)
# kubernetes.io/ingress.class: nginx
# Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts
# nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
# nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"
nginx:
tls:
enabled: false
generateCertificate: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 256Mi
nodeSelector: {}
replicas: 1
tolerations: []
uwsgi:
livenessProbe:
# Enable liveness checks on uwsgi container. Those values are use on nginx readiness checks as well.
enabled: true
failureThreshold: 6
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 2000m
memory: 512Mi
app_settings:
processes: 2
threads: 2
enable_debug: false # this also requires DD_DEBUG to be set to True
certificates:
# includes additional CA certificate as volume, it refrences REQUESTS_CA_BUNDLE env varible
# to create configMap `kubectl create cm defectdojo-ca-certs --from-file=ca.crt`
# NOTE: it reflects REQUESTS_CA_BUNDLE for celery workers, beats as well
enabled: false
configName: defectdojo-ca-certs
certMountPath: /certs/
certFileName: ca.crt
# A list of extra volumes to mount. This
# is useful for bringing in extra data that can be referenced by other configurations
# at a well known path, such as local_settings. The
# value of this should be a list of objects.
#
# Example:
#
# ```yaml
# extraVolumes:
# - type: configMap
# name: local_settings
# path: /app/dojo/settings/local_settings.py
# container: uwsgi
# subPath: local_settings.py
# - type: hostPath
# name: host_directory
# path: /app/dojo/settings/
# hostPath: /var/run
# container: uwsgi
# ```
#
# Each object supports the following keys:
#
# - `type` - Type of the volume, must be one of "configMap", "secret", "hostPath". Case sensitive.
# Even is supported we are highly recommending to avoid hostPath for security reasons (usually blocked by PSP)
# - `name` - Name of the configMap or secret to be mounted. This also controls
# the path that it is mounted to. The volume will be mounted to `/consul/userconfig/<name>`.
# - `path` - defines where file should be exposed
# - `container` - defines where volume needs to be mounted, must be uwsgi or nginx
# - `subPath` - extracts only particular file from secret or configMap
# - `pathType` - only for hostPath, can be one of the "DirectoryOrCreate", "Directory" (default), "FileOrCreate",
# "File", "Socket", "CharDevice", "BlockDevice"
# - `hostPath` - only for hostPath, file or directory from local host
# @type: array<map>
extraVolumes: []
# This feature needs more preparation before can be enabled, please visit KUBERNETES.md#media-persistent-volume
mediaPersistentVolume:
enabled: true
fsGroup: 1001
# any name
name: media
# could be emptyDir (not for production) or pvc
type: emptyDir
# in case if pvc specified, should point to the already existing pvc
persistentVolumeClaim:
# set to true to create a new pvc and if django.mediaPersistentVolume.type is set to pvc
create: false
name:
size: 5Gi
accessModes:
- ReadWriteMany # check KUBERNETES.md doc first for option to choose
storageClassName:
initializer:
run: true
jobAnnotations: {
helm.sh/hook: "post-install,post-upgrade"
}
annotations: {}
keepSeconds: 60
affinity: {}
nodeSelector: {}
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 2000m
memory: 512Mi
# A list of extra volumes to mount. This
# is useful for bringing in extra data that can be referenced by other configurations
# at a well known path, such as local_settings. The
# value of this should be a list of objects.
#
# Example:
#
# ```yaml
# extraVolumes:
# - type: configMap
# name: local_settings
# path: /app/dojo/settings/local_settings.py
# subPath: local_settings.py
# - type: hostPath
# name: host_directory
# path: /tmp
# hostPath: /tmp
# ```
#
# Each object supports the following keys:
#
# - `type` - Type of the volume, must be one of "configMap", "secret", "hostPath". Case sensitive.
# Even is supported we are highly recommending to avoid hostPath for security reasons (usually blocked by PSP)
# - `name` - Name of the configMap or secret to be mounted. This also controls
# the path that it is mounted to. The volume will be mounted to `/consul/userconfig/<name>`.
# - `path` - defines where file should be exposed
# - `subPath` - extracts only particular file from secret or configMap
# - `pathType` - only for hostPath, can be one of the "DirectoryOrCreate", "Directory" (default), "FileOrCreate",
# "File", "Socket", "CharDevice", "BlockDevice"
# - `hostPath` - only for hostPath, file or directory from local host
# @type: array<map>
extraVolumes: []
mysql:
enabled: false
auth:
username: defectdojo
password: ""
rootPassword: ""
database: defectdojo
existingSecret: defectdojo-mysql-specific
secretKey: mysql-password
primary:
service:
ports:
mysql: 3306
# To use an external mySQL instance, set enabled to false and uncomment
# the line below / add external address:
# mysqlServer: "127.0.0.1"
postgresql:
# enabled: true
enabled: false
image:
tag: 11.16.0-debian-11-r9
auth:
username: defectdojo
password: ""
database: defectdojo
existingSecret: defectdojo-postgresql-specific
secretKeys:
adminPasswordKey: postgresql-postgres-password
userPasswordKey: postgresql-password
replicationPasswordKey: postgresql-replication-password
architecture: standalone
primary:
name: primary
persistence:
enabled: true
service:
ports:
postgresql: 5432
podSecurityContext:
# Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC
enabled: true
# fsGroup specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully.
fsGroup: 1001
containerSecurityContext:
# Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC
enabled: true
# runAsUser specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully.
runAsUser: 1001
affinity: {}
nodeSelector: {}
volumePermissions:
enabled: false
# if using restricted SCC set runAsUser: "auto" and if running under anyuid SCC - runAsUser needs to match the line above
containerSecurityContext:
runAsUser: 1001
shmVolume:
chmod:
enabled: false
# To use an external PostgreSQL instance, set enabled to false and uncomment
# the line below:
# postgresServer: "127.0.0.1"
postgresqlha:
enabled: false
global:
pgpool:
existingSecret: defectdojo-postgresql-ha-pgpool
serviceAccount:
create: true
postgresql:
replicaCount: 3
username: defectdojo
password: ""
repmgrPassword: ""
database: defectdojo
existingSecret: defectdojo-postgresql-ha-specific
securityContext:
enabled: true
fsGroup: 1001
containerSecurityContext:
enabled: true
runAsUser: 1001
pgpool:
replicaCount: 3
adminPassword: ""
securityContext:
enabled: true
fsGroup: 1001
volumePermissions:
enabled: true
securityContext:
runAsUser: 1001
persistence:
enabled: true
service:
ports:
postgresql: 5432
# Google CloudSQL support in GKE via gce-proxy
cloudsql:
# To use CloudSQL in GKE set 'enable: true'
enabled: false
# By default, the proxy has verbose logging. Set this to false to make it less verbose
verbose: true
image:
# set repo and image tag of gce-proxy
repository: gcr.io/cloudsql-docker/gce-proxy
tag: 1.33.14
pullPolicy: IfNotPresent
# set CloudSQL instance: 'project:zone:instancename'
instance: ""
# use IAM database authentication
enable_iam_login: false
# whether to use a private IP to connect to the database
use_private_ip: false
# Settings to make running the chart on GKE simpler
gke:
# Set to true to configure the Ingress to use the GKE provided ingress controller
useGKEIngress: false
# Set to true to have GKE automatically provision a TLS certificate for the host specified
# Requires useGKEIngress to be set to true
# When using this option, be sure to set django.ingress.activateTLS to false
useManagedCertificate: false
# Workload Identity allows the K8s service account to assume the IAM access of a GCP service account to interact with other GCP services
workloadIdentityEmail: ""
rabbitmq:
enabled: true
replicaCount: 1
auth:
password: ""
erlangCookie: ""
existingPasswordSecret: defectdojo-rabbitmq-specific
secretPasswordKey: ""
existingErlangSecret: defectdojo-rabbitmq-specific
memoryHighWatermark:
enabled: true
type: relative
value: 0.5
affinity: {}
nodeSelector: {}
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
podSecurityContext:
enabled: true
fsGroup: 1001
containerSecurityContext:
enabled: true
runAsUser: 1001
runAsNonRoot: true
# For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/master/bitnami/redis
redis:
enabled: false
scheme: "redis"
transportEncryption:
enabled: false
params: ''
auth:
existingSecret: defectdojo-redis-specific
existingSecretPasswordKey: redis-password
password: ""
architecture: standalone
# To use an external Redis instance, set enabled to false and uncomment
# the line below:
# redisServer: myrediscluster
# To use a different port for Redis (default: 6379) add a port number and uncomment the lines below:
# master:
# service:
# ports:
# redis: xxxx
# To add extra variables not predefined by helm config it is possible to define in extraConfigs block, e.g. below:
# NOTE Do not store any kind of sensitive information inside of it
# extraConfigs:
# DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED: 'true'
# DD_SOCIAL_AUTH_AUTH0_KEY: 'dev'
# DD_SOCIAL_AUTH_AUTH0_DOMAIN: 'xxxxx'
# Extra secrets can be created inside of extraSecrets block:
# NOTE This is just an exmaple, do not store sensitive data in plain text form, better inject it during the deployment/upgrade by --set extraSecrets.secret=someSecret
# extraSecrets:
# DD_SOCIAL_AUTH_AUTH0_SECRET: 'xxx'
extraConfigs: {}
# To add (or override) extra variables which need to be pulled from another configMap, you can
# use extraEnv. For example:
# extraEnv:
# - name: DD_DATABASE_HOST
# valueFrom:
# configMapKeyRef:
# name: my-other-postgres-configmap
# key: cluster_endpoint

36
dev/defectdojo/helm/values.yaml Executable file
View File

@@ -0,0 +1,36 @@
tag: 2.22.4
fullnameOverride: defectdojo
host: defectdojo.alldcs.nl
site_url: https://defectdojo.alldcs.nl
alternativeHosts:
- defectdojo-django.defectdojo
celery:
beat:
nodeSelector:
kubernetes.io/arch: amd64
worker:
nodeSelector:
kubernetes.io/arch: amd64
initializer:
# should be false after initial installation was performed
run: true
nodeSelector:
kubernetes.io/arch: amd64
django:
ingress:
enabled: true # change to 'false' for OpenShift
activateTLS: false
uwsgi:
livenessProbe:
# Enable liveness checks on uwsgi container. Those values are use on nginx readiness checks as well.
# default value is 120, so in our case 20 is just fine
initialDelaySeconds: 20
nodeSelector:
kubernetes.io/arch: amd64
rabbitmq:
nodeSelector:
kubernetes.io/arch: amd64
postgresql:
primary:
nodeSelector:
kubernetes.io/arch: amd64

View File

View File

@@ -0,0 +1,16 @@
user : admin
password: Defectdojo01@
======
migratie
- inloggen in de uwsgi container en dan:
- python manage.py migrate
toevoegen environment:
https://defectdojo-dev.allarddcs.nl/admin/dojo/development_environment/

View File

@@ -0,0 +1 @@
Hh7ViTz1cVj8PV4faGVO9A

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,473 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: nginx
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: nginx
spec:
containers:
- env:
- name: NGINX_METRICS_ENABLED
value: "false"
- name: DD_UWSGI_HOST
value: "uwsgi.defectdojo"
- name: HTTP_AUTH_PASSWORD
value: "Defectdojo01@"
image: allardkrings/defectdojo-nginx:1.0
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 8080
- containerPort: 8443
resources: {}
volumeMounts:
- mountPath: /usr/share/nginx/html/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-media-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-media-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-media-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/media
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: nginx
name: nginx
namespace: defectdojo
spec:
ports:
- name: "8080"
port: 8080
targetPort: 8080
- name: "8443"
port: 8443
targetPort: 8443
selector:
io.kompose.service: nginx
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: uwsgi
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: uwsgi
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-uwsgi.sh
env:
- name: DD_ALLOWED_HOSTS
value: '*'
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_DEBUG
value: "False"
- name: DD_DJANGO_METRICS_ENABLED
value: "False"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: uwsgi
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: uwsgi-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: uwsgi-claim0
persistentVolumeClaim:
claimName: uwsgi-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: uwsgi-claim0
name: uwsgi-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
ports:
- name: "3031"
port: 3031
targetPort: 3031
selector:
io.kompose.service: uwsgi
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: initializer
name: initializer
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: initializer
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: initializer
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- --
- /entrypoint-initializer.sh
env:
- name: DD_ADMIN_FIRST_NAME
value: Admin
- name: DD_ADMIN_LAST_NAME
value: User
- name: DD_ADMIN_MAIL
value: admin@defectdojo.local
- name: DD_ADMIN_USER
value: admin
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_INITIALIZE
value: "true"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: initializer
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: initializer-claim0
restartPolicy: Always
volumes:
- name: initializer-claim0
persistentVolumeClaim:
claimName: initializer-claim0
status: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: initializer-claim0
name: initializer-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celeryworker
name: celeryworker
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celeryworker
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celeryworker
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-worker.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: celeryworker
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celeryworker-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: celeryworker-claim0
persistentVolumeClaim:
claimName: celeryworker-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
status: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: celeryworker-claim0
name: celeryworker-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celerybeat
name: celerybeat
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celerybeat
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celerybeat
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-beat.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: celerybeat
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celerybeat-claim0
restartPolicy: Always
volumes:
- name: celerybeat-claim0
persistentVolumeClaim:
claimName: celerybeat-claim0
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: celerybeat-claim0
name: celerybeat-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: redis
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: redis
spec:
containers:
- image: redis:7.2.4-alpine@sha256:a40e29800d387e3cf9431902e1e7a362e4d819233d68ae39380532c3310091ac
name: redis
resources: {}
# volumeMounts:
# - mountPath: /data
# name: defectdojo-redis
restartPolicy: Always
# volumes:
# - name: defectdojo-redis
# persistentVolumeClaim:
# claimName: defectdojo-redis-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-redis-pvc
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-redis-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
ports:
- name: "6379"
port: 6379
targetPort: 6379
selector:
io.kompose.service: redis
status:
loadBalancer: {}

View File

@@ -0,0 +1,402 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: nginx
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: nginx
spec:
containers:
- env:
- name: NGINX_METRICS_ENABLED
value: "false"
- name: DD_UWSGI_HOST
value: "uwsgi.defectdojo"
- name: HTTP_AUTH_PASSWORD
value: "Defectdojo01@"
image: defectdojo/defectdojo-nginx
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 8080
- containerPort: 8443
resources: {}
volumeMounts:
- mountPath: /usr/share/nginx/html/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-media-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-media-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-media-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/media
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: nginx
name: nginx
namespace: defectdojo
spec:
ports:
- name: "8080"
port: 8080
targetPort: 8080
- name: "8443"
port: 8443
targetPort: 8443
selector:
io.kompose.service: nginx
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: uwsgi
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: uwsgi
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-uwsgi.sh
env:
- name: DD_ALLOWED_HOSTS
value: '*'
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_DEBUG
value: "False"
- name: DD_DJANGO_METRICS_ENABLED
value: "False"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: uwsgi
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: uwsgi-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: uwsgi-claim0
persistentVolumeClaim:
claimName: uwsgi-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: uwsgi-claim0
name: uwsgi-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
ports:
- name: "3031"
port: 3031
targetPort: 3031
selector:
io.kompose.service: uwsgi
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celeryworker
name: celeryworker
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celeryworker
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celeryworker
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-worker.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: celeryworker
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celeryworker-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: celeryworker-claim0
persistentVolumeClaim:
claimName: celeryworker-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
status: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: celeryworker-claim0
name: celeryworker-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celerybeat
name: celerybeat
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celerybeat
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celerybeat
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-beat.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
image: allardkrings/defectdojo-django:1.0
imagePullPolicy: IfNotPresent
name: celerybeat
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celerybeat-claim0
restartPolicy: Always
volumes:
- name: celerybeat-claim0
persistentVolumeClaim:
claimName: celerybeat-claim0
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: celerybeat-claim0
name: celerybeat-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: redis
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: redis
spec:
containers:
- image: redis:7.2.4-alpine@sha256:a40e29800d387e3cf9431902e1e7a362e4d819233d68ae39380532c3310091ac
name: redis
resources: {}
# volumeMounts:
# - mountPath: /data
# name: defectdojo-redis
restartPolicy: Always
# volumes:
# - name: defectdojo-redis
# persistentVolumeClaim:
# claimName: defectdojo-redis-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-redis-pvc
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-redis-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
ports:
- name: "6379"
port: 6379
targetPort: 6379
selector:
io.kompose.service: redis
status:
loadBalancer: {}

View File

@@ -0,0 +1,410 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: nginx
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: nginx
spec:
containers:
- env:
- name: NGINX_METRICS_ENABLED
value: "false"
- name: DD_UWSGI_HOST
value: "uwsgi.defectdojo"
- name: HTTP_AUTH_PASSWORD
value: "Defectdojo01@"
image: defectdojo/defectdojo-nginx
imagePullPolicy: IfNotPresent
name: nginx
ports:
- containerPort: 8080
- containerPort: 8443
resources: {}
volumeMounts:
- mountPath: /usr/share/nginx/html/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-media-pvc
namespace: defectdojo
spec:
storageClassName: ""
volumeName: defectdojo-media-pv
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-media-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/media
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: nginx
name: nginx
namespace: defectdojo
spec:
ports:
- name: "8080"
port: 8080
targetPort: 8080
- name: "8443"
port: 8443
targetPort: 8443
selector:
io.kompose.service: nginx
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: uwsgi
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: uwsgi
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-uwsgi.sh
env:
- name: DD_ALLOWED_HOSTS
value: '*'
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_DEBUG
value: "False"
- name: DD_DJANGO_METRICS_ENABLED
value: "False"
- name: DD_ASYNC_FINDING_IMPORT
value: "False"
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: uwsgi
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: uwsgi-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: uwsgi-claim0
persistentVolumeClaim:
claimName: uwsgi-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: uwsgi-claim0
name: uwsgi-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: uwsgi
name: uwsgi
namespace: defectdojo
spec:
ports:
- name: "3031"
port: 3031
targetPort: 3031
selector:
io.kompose.service: uwsgi
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celeryworker
name: celeryworker
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celeryworker
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celeryworker
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-worker.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: celeryworker
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celeryworker-claim0
- mountPath: /app/media
name: defectdojo-media
restartPolicy: Always
volumes:
- name: celeryworker-claim0
persistentVolumeClaim:
claimName: celeryworker-claim0
- name: defectdojo-media
persistentVolumeClaim:
claimName: defectdojo-media-pvc
status: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: celeryworker-claim0
name: celeryworker-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: celerybeat
name: celerybeat
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: celerybeat
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: celerybeat
spec:
containers:
- command:
- /wait-for-it.sh
- postgres16.postgres:5432
- -t
- "30"
- --
- /entrypoint-celery-beat.sh
env:
- name: DD_CELERY_BROKER_URL
value: redis://redis.defectdojo:6379/0
- name: DD_CREDENTIAL_AES_256_KEY
value: '&91a*agLqesc*0DJ+2*bAbsUZfR*4nLw'
- name: DD_DATABASE_URL
value: postgresql://defectdojo:defectdojo@postgres16.postgres:5432/defectdojo
- name: DD_SECRET_KEY
value: hhZCp@D28z!n@NED*yB!ROMt+WzsY*iq
- name: DD_ENABLE_AUDITLOG
value: "False"
image: defectdojo/defectdojo-django
imagePullPolicy: IfNotPresent
name: celerybeat
resources: {}
volumeMounts:
- mountPath: /app/docker/extra_settings
name: celerybeat-claim0
restartPolicy: Always
volumes:
- name: celerybeat-claim0
persistentVolumeClaim:
claimName: celerybeat-claim0
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
io.kompose.service: celerybeat-claim0
name: celerybeat-claim0
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: redis
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: redis
spec:
containers:
- image: redis:7.2.4-alpine@sha256:a40e29800d387e3cf9431902e1e7a362e4d819233d68ae39380532c3310091ac
name: redis
resources: {}
# volumeMounts:
# - mountPath: /data
# name: defectdojo-redis
restartPolicy: Always
# volumes:
# - name: defectdojo-redis
# persistentVolumeClaim:
# claimName: defectdojo-redis-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: defectdojo-redis-pvc
namespace: defectdojo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: defectdojo-redis-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/defectdojo/redis
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: redis
name: redis
namespace: defectdojo
spec:
ports:
- name: "6379"
port: 6379
targetPort: 6379
selector:
io.kompose.service: redis
status:
loadBalancer: {}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: defectdojo-http
namespace: defectdojo
spec:
entryPoints:
- web
routes:
- match: Host(`defectdojo-dev.allarddcs.nl`)
kind: Rule
services:
- name: nginx
port: 8080

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: defectdojo-tls
namespace: defectdojo
spec:
entryPoints:
- websecure
routes:
- match: Host(`defectdojo-dev.allarddcs.nl`)
kind: Rule
services:
- name: nginx
port: 8080
tls:
certResolver: letsencrypt

5
dev/defectdojo/yaml/restart.sh Executable file
View File

@@ -0,0 +1,5 @@
microk8s kubectl rollout restart deployment -n defectdojo uwsgi
microk8s kubectl rollout restart deployment -n defectdojo celerybeat
microk8s kubectl rollout restart deployment -n defectdojo celeryworker
microk8s kubectl rollout restart deployment -n defectdojo celeryworker-high
microk8s kubectl rollout restart deployment -n defectdojo celeryworker-low

34
dev/deptrack/README.md Executable file
View File

@@ -0,0 +1,34 @@
#Installatie
kubectl apply -f deptrack.yaml
Opletten dat de API-URL klopt met VIMEXX DNS in stellingen: deptracka.alldcs.nl
Dit kun je controleren door te "pingen".
In de yaml moet de setting staan:
- name: API_BASE_URL
value: 'https://deptracka-dev.alldcs.nl'
#configuratie tekton:
- ga naar deptrackmenu -> configuration -> access-management -> teams
- kijk bij team "automation" en kopieer de api-key
- vul die in in de pipelinerun en in de gitea-trigger-template
- Je moet ook een project aanmaken met de juiste versie
#integratie met defectdojo
- haal de api key v2 op in defectdojo (menu rechtsboven bij symbool poppetje);
- vul die in bij deptrack bij integrations -> defectdojo
- je moet ook properties aanmaken:
Attribute Value
Group Name integrations
Property Name defectdojo.engagementId
Property Value The CI/CD engagement ID to upload findings to, noted in Step 3 s
Property Type STRING
Zie ook: https://docs.dependencytrack.org/integrations/defectdojo/
#ingressroutes
werkt met TCP-route op tls en http

View File

@@ -0,0 +1,19 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-deptrack
title: Deptrack (dev)
description: Dependency Track instance running in Kubernetes
annotations:
backstage.io/kubernetes-label-selector: "app=deptrack-frontend"
links:
- url: https://github.com/AllardKrings/kubernetes/dev/deptrackback
title: deptrack-configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
subcomponentOf: component:default/DEV-cluster

252
dev/deptrack/deptrack.old Normal file
View File

@@ -0,0 +1,252 @@
apiVersion: v1
kind: Namespace
metadata:
name: deptrack
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deptrack-apiserver
namespace: deptrack
labels:
app: deptrack-apiserver
spec:
replicas: 1
selector:
matchLabels:
app: deptrack-apiserver
template:
metadata:
labels:
app: deptrack-apiserver
spec:
containers:
- name: deptrack-apiserver
image: dependencytrack/apiserver
ports:
- containerPort: 8080
env:
- name: ALPINE_DATABASE_MODE
value: 'external'
- name: ALPINE_DATABASE_URL
value: 'jdbc:postgresql://postgres13.postgres:5432/deptrack'
# value: 'jdbc:postgresql://192.168.2.233:5432/deptrack'
- name: ALPINE_DATABASE_DRIVER
value: 'org.postgresql.Driver'
- name: ALPINE_DATABASE_USERNAME
value: 'deptrack'
- name: ALPINE_DATABASE_PASSWORD
value: 'deptrack'
- name: ALPINE_DATABASE_POOL_ENABLED
value: 'true'
- name: ALPINE_DATABASE_POOL_MAX_SIZE
value: '20'
- name: ALPINE_DATABASE_POOL_MIN_IDLE
value: '10'
- name: ALPINE_DATABASE_POOL_IDLE_TIMEOUT
value: '300000'
- name: ALPINE_DATABASE_POOL_MAX_LIFETIME
value: '600000'
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: deptrack-data-pvc
---
apiVersion: v1
kind: Service
metadata:
name: deptrack-apiserver
namespace: deptrack
labels:
name: deptrack-apiserver
spec:
type: ClusterIP
ports:
- port: 8080
name: deptrack-apiserver
selector:
app: deptrack-apiserver
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deptrack-frontend
namespace: deptrack
labels:
app: deptrack-frontend
spec:
replicas: 1
selector:
matchLabels:
app: deptrack-frontend
template:
metadata:
labels:
app: deptrack-frontend
spec:
containers:
- name: deptrack-frontend
image: dependencytrack/frontend
ports:
- containerPort: 8080
env:
- name: API_BASE_URL
value: 'https://deptracka-dev.allarddcs.nl'
volumeMounts:
- mountPath: /app/static/config.json
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: deptrack-config-pvc
---
apiVersion: v1
kind: Service
metadata:
name: deptrack-frontend
namespace: deptrack
labels:
name: deptrack-frontend
spec:
type: ClusterIP
ports:
- port: 8080
name: deptrack-frontend
selector:
app: deptrack-frontend
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptrackf-http
namespace: deptrack
spec:
entryPoints:
- web
routes:
- match: Host(`deptrackf-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-frontend
port: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptracka-http
namespace: deptrack
spec:
entryPoints:
- web
routes:
- match: Host(`deptracka-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-apiserver
port: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptrackf-tls
namespace: deptrack
spec:
entryPoints:
- websecure
routes:
- match: Host(`deptrackf-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-frontend
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptracka-tls
namespace: deptrack
spec:
entryPoints:
- websecure
routes:
- match: Host(`deptracka-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-apiserver
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: deptrack-data-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/deptrack/data
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: deptrack-data-pvc
namespace: deptrack
spec:
storageClassName: ""
volumeName: deptrack-data-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: deptrack-config-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/deptrack/config
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: deptrack-config-pvc
namespace: deptrack
spec:
storageClassName: ""
volumeName: deptrack-config-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

292
dev/deptrack/deptrack.yaml Normal file
View File

@@ -0,0 +1,292 @@
apiVersion: v1
kind: Namespace
metadata:
name: deptrack
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deptrack-apiserver
namespace: deptrack
labels:
app: deptrack-apiserver
spec:
replicas: 1
selector:
matchLabels:
app: deptrack-apiserver
template:
metadata:
labels:
app: deptrack-apiserver
spec:
initContainers:
- name: init-deptrack
image: dependencytrack/apiserver
command:
- sh
- -c
- |
if [ ! -d /data/.dependency-track ] || [ -z "$(ls -A /data/.dependency-track)" ]; then
echo "Seeding /data/.dependency-track from container image..."
mkdir -p /data/.dependency-track
cp -r /opt/dependency-track/.dependency-track/* /data/.dependency-track/
echo "Seeding complete."
else
echo "/data/.dependency-track already populated, skipping."
fi
volumeMounts:
- name: data
mountPath: /data
containers:
- name: deptrack-apiserver
image: dependencytrack/apiserver
ports:
- containerPort: 8080
env:
- name: ALPINE_DATABASE_MODE
value: 'external'
- name: ALPINE_DATABASE_URL
value: 'jdbc:postgresql://postgres13.postgres:5432/deptrack'
- name: ALPINE_DATABASE_DRIVER
value: 'org.postgresql.Driver'
- name: ALPINE_DATABASE_USERNAME
value: 'deptrack'
- name: ALPINE_DATABASE_PASSWORD
value: 'deptrack'
- name: ALPINE_DATABASE_POOL_ENABLED
value: 'true'
- name: ALPINE_DATABASE_POOL_MAX_SIZE
value: '20'
- name: ALPINE_DATABASE_POOL_MIN_IDLE
value: '10'
- name: ALPINE_DATABASE_POOL_IDLE_TIMEOUT
value: '300000'
- name: ALPINE_DATABASE_POOL_MAX_LIFETIME
value: '600000'
volumeMounts:
- mountPath: /data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: deptrack-data-pvc
---
apiVersion: v1
kind: Service
metadata:
name: deptrack-apiserver
namespace: deptrack
labels:
name: deptrack-apiserver
spec:
type: ClusterIP
ports:
- port: 8080
name: deptrack-apiserver
selector:
app: deptrack-apiserver
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deptrack-frontend
namespace: deptrack
labels:
app: deptrack-frontend
spec:
replicas: 1
selector:
matchLabels:
app: deptrack-frontend
template:
metadata:
labels:
app: deptrack-frontend
spec:
initContainers:
- name: init-frontend-config
image: dependencytrack/frontend
command:
- sh
- -c
- |
echo "Init container starting..."
# Make sure temporary mount exists
mkdir -p /mnt/config
# Copy config.json from image to PVC if it doesn't exist
if [ ! -f /mnt/config/config.json ]; then
echo "Seeding config.json from container image..."
cp /opt/owasp/dependency-track-frontend/static/config.json /mnt/config/config.json
echo "Seeding complete."
else
echo "config.json already exists on PVC, skipping."
fi
volumeMounts:
- name: config
mountPath: /mnt/config
containers:
- name: deptrack-frontend
image: dependencytrack/frontend
ports:
- containerPort: 8080
env:
- name: API_BASE_URL
value: 'https://deptracka-dev.allarddcs.nl'
volumeMounts:
- name: config
mountPath: /opt/owasp/dependency-track-frontend/static/config.json
subPath: config.json
volumes:
- name: config
persistentVolumeClaim:
claimName: deptrack-config-pvc
---
apiVersion: v1
kind: Service
metadata:
name: deptrack-frontend
namespace: deptrack
labels:
name: deptrack-frontend
spec:
type: ClusterIP
ports:
- port: 8080
name: deptrack-frontend
selector:
app: deptrack-frontend
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptrackf-http
namespace: deptrack
spec:
entryPoints:
- web
routes:
- match: Host(`deptrackf-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-frontend
port: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptracka-http
namespace: deptrack
spec:
entryPoints:
- web
routes:
- match: Host(`deptracka-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-apiserver
port: 8080
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptrackf-tls
namespace: deptrack
spec:
entryPoints:
- websecure
routes:
- match: Host(`deptrackf-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-frontend
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: deptracka-tls
namespace: deptrack
spec:
entryPoints:
- websecure
routes:
- match: Host(`deptracka-dev.allarddcs.nl`)
kind: Rule
services:
- name: deptrack-apiserver
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: deptrack-data-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/deptrack/data
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: deptrack-data-pvc
namespace: deptrack
spec:
storageClassName: ""
volumeName: deptrack-data-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: deptrack-config-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/deptrack/config
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: deptrack-config-pvc
namespace: deptrack
spec:
storageClassName: ""
volumeName: deptrack-config-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,19 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-dnsutils
title: DNSUtils (dev)
description: DNSUtils instance running in Kubernetes
annotations:
backstage.io/kubernetes-label-selector: "app=dnsutils"
links:
- url: https://github.com/AllardKrings/kubernetes/dev/dnsutils
title: dnsutils-configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
subcomponentOf: component:default/DEV-cluster

19
dev/dnsutils/dnsutils.yaml Executable file
View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Namespace
metadata:
name: dnsutils
---
apiVersion: v1
kind: Pod
metadata:
name: dnsutils
namespace: dnsutils
spec:
containers:
- name: dnsutils
image: registry.k8s.io/e2e-test-images/jessie-dnsutils:1.3
command:
- sleep
- "infinity"
imagePullPolicy: IfNotPresent
restartPolicy: Always

View File

@@ -0,0 +1,19 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-drupal
title: Drupal (dev)
description: Drupal instance running in Kubernetes
annotations:
backstage.io/kubernetes-label-selector: "app=drupal"
links:
- url: https://github.com/AllardKrings/kubernetes/dev/drupal
title: drupal-configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
subcomponentOf: component:default/DEV-cluster

133
dev/drupal/drupal.yaml Normal file
View File

@@ -0,0 +1,133 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
replicas: 1
selector:
matchLabels:
app: drupal
template:
metadata:
labels:
app: drupal
spec:
initContainers:
- name: init-sites-volume
image: drupal
command: ['/bin/bash', '-c']
args: ['chown www-data:www-data /var/www/html/sites -R']
volumeMounts:
- name: drupal-data
mountPath: /var/www/html/sites
subPath: sites
containers:
- name: drupal
image: drupal
imagePullPolicy: Always
ports:
- containerPort: 80
volumeMounts:
- name: drupal-data
mountPath: /var/www/html/modules
subPath: modules
- name: drupal-data
mountPath: /var/www/html/profiles
subPath: profiles
- name: drupal-data
mountPath: /var/www/html/themes
subPath: themes
- name: drupal-data
mountPath: /var/www/html/sites
subPath: sites
volumes:
- name: drupal-data
persistentVolumeClaim:
claimName: drupal-pvc
---
apiVersion: v1
kind: Service
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
sessionAffinity: None
ports:
- protocol: TCP
port: 80
selector:
app: drupal
type: LoadBalancer
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: drupal-tls
namespace: drupal
spec:
entryPoints:
- websecure
routes:
- match: Host(`drupal-dev.alldcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: drupal-http
namespace: drupal
spec:
entryPoints:
- web
routes:
- match: Host(`drupal-dev.alldcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drupal-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/drupal/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drupal-pvc
namespace: drupal
spec:
storageClassName: ""
volumeName: drupal-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,27 @@
CRD's INSTALLEREN:
Handleiding komt van:
www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html
Installeren CRD's
kubectl create -f https://download.elastic.co/downloads/eck/2.5.0/crds.yaml
customresourcedefinition.apiextensions.k8s.io/agents.agent.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/apmservers.apm.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/beats.beat.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticmapsservers.maps.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticsearchautoscalers.autoscaling.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/elasticsearches.elasticsearch.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/enterprisesearches.enterprisesearch.k8s.elastic.co created
customresourcedefinition.apiextensions.k8s.io/kibanas.kibana.k8s.elastic.co created
Ik heb een loadbancer toegevoerd )kibana-lb.yaml , die werkt vanaf buiten niet (relative url?) maar wel op de nodeport.
USER/PASSWORD:
user: elastic
password:
kubectl get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo

View File

@@ -0,0 +1,268 @@
# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: elastic-agent
namespace: kube-system
labels:
app: elastic-agent
spec:
selector:
matchLabels:
app: elastic-agent
template:
metadata:
labels:
app: elastic-agent
spec:
# Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes.
# Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: elastic-agent
hostNetwork: true
# 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host.
# Sharing the host process ID namespace gives visibility of all processes running on the same host.
hostPID: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: elastic-agent
image: docker.elastic.co/beats/elastic-agent:8.5.3
env:
# Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode
- name: FLEET_ENROLL
value: "1"
# Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS
- name: FLEET_INSECURE
value: "true"
# Fleet Server URL to enroll the Elastic Agent into
# FLEET_URL can be found in Kibana, go to Management > Fleet > Settings
- name: FLEET_URL
value: "https://fleet-server:8220"
# Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens)
# If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed
- name: FLEET_ENROLLMENT_TOKEN
value: "token-id"
- name: KIBANA_HOST
value: "http://kibana:5601"
# The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet
- name: KIBANA_FLEET_USERNAME
value: "elastic"
# The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet
- name: KIBANA_FLEET_PASSWORD
value: "changeme"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
securityContext:
runAsUser: 0
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: proc
mountPath: /hostfs/proc
readOnly: true
- name: cgroup
mountPath: /hostfs/sys/fs/cgroup
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
- name: etc-full
mountPath: /hostfs/etc
readOnly: true
- name: var-lib
mountPath: /hostfs/var/lib
readOnly: true
- name: etc-mid
mountPath: /etc/machine-id
readOnly: true
volumes:
- name: proc
hostPath:
path: /proc
- name: cgroup
hostPath:
path: /sys/fs/cgroup
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# The following volumes are needed for Cloud Security Posture integration (cloudbeat)
# If you are not using this integration, then these volumes and the corresponding
# mounts can be removed.
- name: etc-full
hostPath:
path: /etc
- name: var-lib
hostPath:
path: /var/lib
# Mount /etc/machine-id from the host to determine host ID
# Needed for Elastic Security integration
- name: etc-mid
hostPath:
path: /etc/machine-id
type: File
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elastic-agent
subjects:
- kind: ServiceAccount
name: elastic-agent
namespace: kube-system
roleRef:
kind: ClusterRole
name: elastic-agent
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: kube-system
name: elastic-agent
subjects:
- kind: ServiceAccount
name: elastic-agent
namespace: kube-system
roleRef:
kind: Role
name: elastic-agent
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: elastic-agent-kubeadm-config
namespace: kube-system
subjects:
- kind: ServiceAccount
name: elastic-agent
namespace: kube-system
roleRef:
kind: Role
name: elastic-agent-kubeadm-config
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elastic-agent
labels:
k8s-app: elastic-agent
rules:
- apiGroups: [""]
resources:
- nodes
- namespaces
- events
- pods
- services
- configmaps
# Needed for cloudbeat
- serviceaccounts
- persistentvolumes
- persistentvolumeclaims
verbs: ["get", "list", "watch"]
# Enable this rule only if planing to use kubernetes_secrets provider
#- apiGroups: [""]
# resources:
# - secrets
# verbs: ["get"]
- apiGroups: ["extensions"]
resources:
- replicasets
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources:
- statefulsets
- deployments
- replicasets
- daemonsets
verbs: ["get", "list", "watch"]
- apiGroups:
- ""
resources:
- nodes/stats
verbs:
- get
- apiGroups: [ "batch" ]
resources:
- jobs
- cronjobs
verbs: [ "get", "list", "watch" ]
# Needed for apiserver
- nonResourceURLs:
- "/metrics"
verbs:
- get
# Needed for cloudbeat
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs: ["get", "list", "watch"]
# Needed for cloudbeat
- apiGroups: ["policy"]
resources:
- podsecuritypolicies
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: elastic-agent
# Should be the namespace where elastic-agent is running
namespace: kube-system
labels:
k8s-app: elastic-agent
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs: ["get", "create", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: elastic-agent-kubeadm-config
namespace: kube-system
labels:
k8s-app: elastic-agent
rules:
- apiGroups: [""]
resources:
- configmaps
resourceNames:
- kubeadm-config
verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: elastic-agent
namespace: kube-system
labels:
k8s-app: elastic-agent
---

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: dev-elasticsearch-kibana
title: Elasticsearch-kibana (dev)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: quickstart
spec:
version: 8.5.3
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false

View File

@@ -0,0 +1,31 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: quickstart-http-dialdcs
spec:
entryPoints:
- web
routes:
- match: Host("elastic.dialdcs.com")
kind: Rule
middlewares:
- name: redirect-to-https
services:
- name: quickstart-kb-http
port: 5601
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: quickstart-http-alldcs
spec:
entryPoints:
- web
routes:
- match: Host("elastic.alldcs.nl")
kind: Rule
middlewares:
- name: redirect-to-https
services:
- name: quickstart-kb-http
port: 5601

Some files were not shown because too many files have changed in this diff Show More