initial commit

This commit is contained in:
allard
2025-11-23 18:58:51 +01:00
commit 376a944abc
1553 changed files with 314731 additions and 0 deletions

1
riscv/README.md Normal file
View File

@@ -0,0 +1 @@
Dit zijn alle services op het PROD-cluster:

View File

@@ -0,0 +1,10 @@
#Ingressroutes:
Blijkbaar moet je ingressrouteTCP configureren op API traefik.io.
Kortom, geen TLS-interrupt door traefik, maar verkeer op entrypoint 443/websecure gewoon ongewijzigd doorsturen naar argo-server
#Over de workflow
Je moet een pvc aanmaken om gegevens door te geven tussen de steps.
Voor build moet je de docker directory mounten in de container met maven.
Voor deploy moet je een kubeconfig mounten de verwijst naar kubernetes op de host.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: argo-workflows-pv
labels:
type: local
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/argo-workflows/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: argo-workflows-pvc
namespace: argo
spec:
storageClassName: ""
volumeName: argo-workflows-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-argo-workflows
title: Argo-workflows (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,4 @@
kubectl create secret generic docker-creds \
--from-literal=username=allardkrings@gmail.com \
--from-literal=password='Kubernetes01@' \
-n argo

View File

@@ -0,0 +1,4 @@
kubectl create secret generic gitea-creds \
--from-literal=username=allard \
--from-literal=password='Gitea01@' \
-n argo

View File

@@ -0,0 +1,4 @@
kubectl create secret generic github-creds \
--from-literal=username=allardkrings@gmail.com \
--from-literal=password='Kubernetes01@' \
-n argo

View File

@@ -0,0 +1 @@
kubectl create secret generic kubeconfig-secret --from-file=config=/home/ubuntu/.kube/config

View File

@@ -0,0 +1,4 @@
kubectl create secret generic nexus-credentials \
--from-literal=username=admin \
--from-literal=password='Nexus01@' \
-n argo

View File

@@ -0,0 +1,17 @@
#apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: argo-workflows-tcp-tls
namespace: argo
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`argo-riscv.allarddcs.nl`)
services:
- name: argo-server
port: 2746
tls:
passthrough: true

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-workflow
namespace: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-workflow-role
namespace: argo
rules:
# Allow managing Deployments
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
# Allow managing Services
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
# Allow managing Traefik IngressRoutes
- apiGroups: ["traefik.io"]
resources: ["ingressroutes"]
verbs: ["get", "list", "create", "update", "patch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-workflow-rolebinding
namespace: argo
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: argo
roleRef:
kind: Role
name: argo-workflow-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,72 @@
apiVersion: argoproj.io/v1alpha1
kind: WorkflowTemplate
metadata:
name: clone-build-deploy
namespace: argo
spec:
entrypoint: main
volumes:
- name: workdir
persistentVolumeClaim:
claimName: argo-workflows-pvc
- name: dockersock
hostPath:
path: /var/run/docker.sock
- name: kubeconfig
secret:
secretName: kubeconfig-secret
templates:
- name: main
steps:
- - name: build
template: git-clone
- - name: deploy
template: deploy-app
- name: git-clone
container:
image: allardkrings/riscv64-maven
command: ["sh", "-c"]
args:
- |
echo "Cleaning up /src directory..." && \
rm -rf /src/* /src/.* 2>/dev/null || true && \
git clone http://allard:Gitea01%40@gitea.gitea.svc.cluster.local:3000/allard/olproperties.git /src && \
cd /src && \
git status && \
ls -la && \
mvn clean install
env:
- name: USERNAME
valueFrom:
secretKeyRef:
name: gitea-creds
key: username
- name: PASSWORD
valueFrom:
secretKeyRef:
name: gitea-creds
key: password
volumeMounts:
- name: workdir
mountPath: /src
- name: dockersock
mountPath: /var/run/docker.sock
- name: deploy-app
serviceAccountName: argo-workflow
container:
image: allardkrings/riscv64-kubectl
command: ["sh", "-c"]
args:
- |
echo "Contents of /src:" && ls -la /src && \
echo "Checking deployment.yaml:" && cat /src/deployment.yaml && \
echo "Deploying application..." && \
kubectl apply -f /src/deployment.yaml --validate=false
volumeMounts:
- name: workdir
mountPath: /src
continueOn:
failed: true

View File

@@ -0,0 +1,51 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: dag-target-
namespace: argo
name: dag-target
spec:
entrypoint: dag-target
arguments:
parameters:
- name: target
value: E
templates:
- name: dag-target
dag:
target: "{{workflow.parameters.target}}"
tasks:
- name: A
template: echo
arguments:
parameters: [{name: message, value: A}]
- name: B
depends: "A"
template: echo
arguments:
parameters: [{name: message, value: B}]
- name: C
depends: "A"
template: echo
arguments:
parameters: [{name: message, value: C}]
- name: D
depends: "B && C"
template: echo
arguments:
parameters: [{name: message, value: D}]
- name: E
depends: "D"
template: echo
arguments:
parameters: [{name: message, value: E}]
- name: echo
inputs:
parameters:
- name: message
container:
image: riscv64/alpine
command: [echo, "{{inputs.parameters.message}}"]

View File

@@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: WorkflowTemplate
metadata:
name: git-clone-template
spec:
entrypoint: git-clone
templates:
- name: git-clone
inputs:
artifacts:
- name: argo-source
path: /src
git:
repo: https://gitea-riscv/allard/olproperties.git
usernameSecret:
name: github-creds
key: username
passwordSecret:
name: github-creds
key: password
container:
image: allardkrings/riscv64-maven
command: ["sh", "-c"]
args: ["git status && ls && cat VERSION"]
workingDir: /src

View File

@@ -0,0 +1,26 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: git-clone-
namespace: argo
spec:
entrypoint: git-clone
templates:
- name: git-clone
inputs:
artifacts:
- name: argo-source
path: /src
git:
repo: gitea.gitea.svc.cluster.local/allard/olproperties.git
usernameSecret:
name: github-creds
key: username
passwordSecret:
name: github-creds
key: password
container:
image: allardkrings/riscv64-maven
command: ["sh", "-c"]
args: ["git status && ls && cat VERSION"]
workingDir: /src

View File

@@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: input-artifact-git-
name: input-artifact-git
namespace: argo
spec:
entrypoint: git-clone
templates:
- name: git-clone
inputs:
artifacts:
- name: argo-source
path: /src
git:
repo: https://gitea-riscv/allard/olproperties.git
usernameSecret:
name: github-creds
key: username
passwordSecret:
name: github-creds
key: password
container:
image: allardkrings/riscv64-maven
command: [sh, -c]
args: ["git status && ls && cat VERSION"]
workingDir: /src

36
riscv/catalog-info.yaml Normal file
View File

@@ -0,0 +1,36 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: RISCV-cluster
namespace: default
description: Alle deployment van RISCV-cluster github
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/AllardKrings/kubernetes
title: AllardDCS Kubernetes Configuration
docs:
- url: ./README.md
spec:
type: service
lifecycle: production
owner: group:default/allarddcs
- ./nexus/catalog-info.yaml
- ./nginx/catalog-info.yaml
- ./olproperties/catalog-info.yaml
- ./traefik/catalog-info.yaml
- ./openliberty/catalog-info.yaml
- ./gitea/catalog-info.yaml
- ./argo-workflows/catalog-info.yaml
- ./postgres14/catalog-info.yaml
- ./dnsutils/catalog-info.yaml
- ./nextcloud/catalog-info.yaml
- ./monica/catalog-info.yaml
- ./zabbix/catalog-info.yaml
- ./drupal/catalog-info.yaml
- ./jenkins/catalog-info.yaml
- ./mariadb/catalog-info.yaml
- ./kubernetes/catalog-info.yaml
- ./pgadmin/catalog-info.yaml
- ./joomla/catalog-info.yaml
- ./phpmyadmin/catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-dnsutils
title: Dnsutils (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: dnsutils
spec:
containers:
- name: dnsutils
image: allardkrings/riscv64-dnsutils:1.0
command:
- sleep
- "infinity"
imagePullPolicy: IfNotPresent
restartPolicy: Always

2
riscv/drupal/README.md Normal file
View File

@@ -0,0 +1,2 @@
User: admin
Pasword: DrupalDrupal01@

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-drupal
title: Drupal (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

135
riscv/drupal/drupal.bak Normal file
View File

@@ -0,0 +1,135 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
replicas: 1
selector:
matchLabels:
app: drupal
template:
metadata:
labels:
app: drupal
spec:
containers:
- name: drupal
image: riscv64/drupal:11.1.2-php8.3-fpm-alpine3.21
imagePullPolicy: Always
env:
- name: MYSQL_DATABASE
value: drupal
- name: MYSQL_USER
value: drupal
- name: MYSQL_PASSWORD
value: DRUPAL
- name: MYSQL_ROOT_PASSWORD
value: password
ports:
- containerPort: 9000
volumeMounts:
- name: drupal-data
mountPath: /var/www/html
- name: nginx
image: riscv64/nginx:1.27.4-alpine
ports:
- containerPort: 80
volumeMounts:
- name: drupal-data
mountPath: /var/www/html
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
volumes:
- name: drupal-data
persistentVolumeClaim:
claimName: drupal-pvc
- name: nginx-config
configMap:
name: nginx-config
---
apiVersion: v1
kind: Service
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
selector:
app: drupal
type: NodePort
sessionAffinity: None
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drupal-tls
namespace: drupal
spec:
entryPoints:
- websecure
routes:
- match: Host(`drupal-riscv.allarddcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drupal-http
namespace: drupal
spec:
entryPoints:
- web
routes:
- match: Host(`drupal-riscv.allarddcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drupal-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/drupal/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drupal-pvc
namespace: drupal
spec:
storageClassName: ""
volumeName: drupal-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

135
riscv/drupal/drupal.yaml Normal file
View File

@@ -0,0 +1,135 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
replicas: 1
selector:
matchLabels:
app: drupal
template:
metadata:
labels:
app: drupal
spec:
containers:
- name: drupal
image: riscv64/drupal:fpm
imagePullPolicy: Always
env:
- name: MYSQL_DATABASE
value: drupal
- name: MYSQL_USER
value: drupal
- name: MYSQL_PASSWORD
value: DRUPAL
- name: MYSQL_ROOT_PASSWORD
value: password
ports:
- containerPort: 9000
volumeMounts:
- name: drupal-data
mountPath: /opt/drupal/web
- name: nginx
image: riscv64/nginx:1.27.4-alpine
ports:
- containerPort: 80
volumeMounts:
- name: drupal-data
mountPath: /opt/drupal/web
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
volumes:
- name: drupal-data
persistentVolumeClaim:
claimName: drupal-pvc
- name: nginx-config
configMap:
name: nginx-config
---
apiVersion: v1
kind: Service
metadata:
name: drupal
namespace: drupal
labels:
app: drupal
spec:
selector:
app: drupal
type: NodePort
sessionAffinity: None
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drupal-tls
namespace: drupal
spec:
entryPoints:
- websecure
routes:
- match: Host(`drupal-riscv.allarddcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: drupal-http
namespace: drupal
spec:
entryPoints:
- web
routes:
- match: Host(`drupal-riscv.allarddcs.nl`)
kind: Rule
services:
- name: drupal
port: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drupal-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/drupal/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drupal-pvc
namespace: drupal
spec:
storageClassName: ""
volumeName: drupal-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
namespace: drupal
data:
default.conf: |
server {
listen 80;
server_name _;
root /opt/drupal/web;
index index.php index.html index.htm;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~ \.php$ {
include fastcgi_params;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT /opt/drupal/web;
}
}

2
riscv/gitea/README.md Normal file
View File

@@ -0,0 +1,2 @@
user: allard
password: Gitea01@

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-gitea
title: Gitea (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

119
riscv/gitea/gitea.yaml Normal file
View File

@@ -0,0 +1,119 @@
apiVersion: v1
kind: Service
metadata:
name: gitea
namespace: "gitea"
labels:
app.kubernetes.io/instance: gitea
app.kubernetes.io/name: gitea
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: http
port: 3000
targetPort: http
nodePort: null
- name: ssh
port: 22
targetPort: ssh
nodePort: null
selector:
app.kubernetes.io/instance: gitea
app.kubernetes.io/name: gitea
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: "gitea"
spec:
selector:
matchLabels:
app.kubernetes.io/name: gitea
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/instance: gitea
app.kubernetes.io/name: gitea
spec:
containers:
- name: gitea
image: allardkrings/riscv64-gitea:1.21.7
env:
- name: GITEA__database__DB_TYPE
value: mysql
- name: GITEA__database__HOST
value: mariadb.mariadb:3306
- name: GITEA__database__NAME
value: gitea
- name: GITEA__database__USER
value: gitea
- name: GITEA__database__PASSWD
value: gitea
ports:
- name: http
containerPort: 3000
- name: ssh
containerPort: 2222
volumeMounts:
- name: gitea-data
mountPath: /data
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-pvc
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: gitea-tls
namespace: gitea
spec:
entryPoints:
- websecure
routes:
- match: Host(`gitea-riscv.allarddcs.nl`)
kind: Rule
services:
- name: gitea
port: 3000
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/gitea-riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-pvc
namespace: gitea
spec:
storageClassName: ""
volumeName: gitea-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

19
riscv/grafana/README.md Executable file
View File

@@ -0,0 +1,19 @@
#ruw materiaal voor mocht het beschikbaar komen op riscv
INSTALLATIE:
kubectl apply -f grafana-pv.yaml
kubectl apply -f grafana-pvc.yaml
kubectl create -f grafana-datasource-config.yaml
kubectl create -f deployment.yaml
kubectl create -f service.yaml
# kubernetes-grafana
Read about the grafana implementation on Kubernetes here https://devopscube.com/setup-grafana-kubernetes/

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-datasources
namespace: observability
data:
prometheus.yaml: |-
{
"apiVersion": 1,
"datasources": [
{
"access":"proxy",
"editable": true,
"name": "prometheus",
"orgId": 1,
"type": "prometheus",
"url": "http://prometheus-service.monitoring.svc:8080",
"version": 1
}
]
}

20
riscv/grafana/grafana-pv.yaml Executable file
View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-pv
namespace: monitoring
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.40.100
path: /mnt/nfs_share/grafana
readOnly: false

19
riscv/grafana/grafana-pvc.yaml Executable file
View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-pvc
namespace: monitoring
spec:
storageClassName: ""
volumeName: grafana-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

78
riscv/grafana/grafana.yaml Executable file
View File

@@ -0,0 +1,78 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
name: grafana
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:latest
ports:
- name: grafana
containerPort: 3000
resources:
limits:
memory: "1Gi"
cpu: "1000m"
requests:
memory: 500M
cpu: "500m"
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-storage
subPath: grafana/storage
- mountPath: /etc/grafana/provisioning/datasources
name: grafana-datasources
readOnly: false
volumes:
- name: grafana-storage
persistentVolumeClaim:
claimName: grafana-pvc
- name: grafana-datasources
configMap:
defaultMode: 420
name: grafana-datasources
---
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: monitoring
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '3000'
spec:
selector:
app: grafana
type: NodePort
ports:
- port: 3000
targetPort: 3000
nodePort: 32000
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: grafana-tls
namespace: monitoring
spec:
entryPoints:
- websecure
routes:
- match: Host(`grafana.alldcs.nl`)
kind: Rule
services:
- name: grafana
port: 3000
tls:
certResolver: letsencrypt

View File

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-jenkins
title: Jenkins (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,140 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins
namespace: jenkins
spec:
replicas: 1
selector:
matchLabels:
app: jenkins
template:
metadata:
labels:
app: jenkins
spec:
containers:
- name: jenkins
image: allardkrings/riscv64-jenkins:1.6
securityContext:
privileged: true
ports:
- name: http-port
containerPort: 8080
- name: jnlp-port
containerPort: 50000
volumeMounts:
- name: jenkins-home
mountPath: /var/jenkins_home
- name: docker-sock
mountPath: "/var/run/docker.sock"
readOnly: false
- name: docker-directory
mountPath: "/var/lib/docker"
readOnly: false
- name: docker-bin
mountPath: "/usr/bin/docker"
readOnly: false
initContainers:
- name: change-ownership-container
image: riscv64/busybox
command: ["/bin/chown","-R","1000:1000", "/var/run/docker.sock"]
securityContext:
runAsUser: 0
privileged: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: jenkins-home
persistentVolumeClaim:
claimName: jenkins-pvc
- name: docker-sock
hostPath:
path: "/var/run/docker.sock"
- name: docker-directory
hostPath:
path: "/var/lib/docker"
- name: docker-bin
hostPath:
path: "/usr/local/bin/docker"
---
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: jenkins
spec:
type: NodePort
ports:
- port: 8080
targetPort: 8080
nodePort: 30000
selector:
app: jenkins
---
apiVersion: v1
kind: Service
metadata:
name: jenkins-jnlp
namespace: jenkins
spec:
type: ClusterIP
ports:
- port: 50000
targetPort: 50000
selector:
app: jenkins
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jenkins-tls
namespace: jenkins
spec:
entryPoints:
- websecure
routes:
- match: Host(`jenkins-riscv.alldcs.nl`)
kind: Rule
services:
- name: jenkins
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jenkins-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/jenkins
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-pvc
namespace: jenkins
spec:
storageClassName: ""
volumeName: jenkins-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

106
riscv/jenkins/jenkins.yaml Normal file
View File

@@ -0,0 +1,106 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins
namespace: jenkins
spec:
replicas: 1
selector:
matchLabels:
app: jenkins
template:
metadata:
labels:
app: jenkins
spec:
containers:
- name: jenkins
image: allardkrings/riscv64-jenkins:1.6
securityContext:
privileged: true
ports:
- name: http-port
containerPort: 8080
- name: jnlp-port
containerPort: 50000
# env:
# - name: MASTER_GLOBAL_JAVA_OPTIONS
# value: "org.csanchez.jenkins.plugins.kubernetes.pipeline.PodTemplateStepExecution.defaultImage=allardkrings/riscv64-jenkins-agent:1.0"
volumeMounts:
- name: jenkins-home
mountPath: /var/jenkins_home
serviceAccountName: "jenkins-service-account"
volumes:
- name: jenkins-home
persistentVolumeClaim:
claimName: jenkins-pvc
---
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: jenkins
spec:
ports:
- port: 8080
name: primary
targetPort: 8080
- port: 50000
name: agent
targetPort: 50000
selector:
app: jenkins
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jenkins-tls
namespace: jenkins
spec:
entryPoints:
- websecure
routes:
- match: Host(`jenkins-riscv.allarddcs.nl`)
kind: Rule
services:
- name: jenkins
port: 8080
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jenkins-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/jenkins
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-pvc
namespace: jenkins
spec:
storageClassName: ""
volumeName: jenkins-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins-service-account
namespace: jenkins
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: jenkins-schedule-agents
namespace: jenkins
rules:
- apiGroups: [""]
resources:
["pods", "pods/exec", "pods/log", "persistentvolumeclaims", "events"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods", "pods/exec", "persistentvolumeclaims"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: jenkins-schedule-agents
namespace: jenkins
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: jenkins-schedule-agents
subjects:
- kind: ServiceAccount
name: jenkins-service-account
namespace: jenkins

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-joomla
title: Joomla (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,46 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: joomla-http
namespace: joomla
spec:
entryPoints:
- web
routes:
- match: Host(`joomla-riscv.allarddcs.nl`)
kind: Rule
services:
- name: joomla
port: 80
middlewares:
- name: redirect-to-https
namespace: joomla
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: joomla-tls
namespace: joomla
spec:
entryPoints:
- websecure
routes:
- match: Host(`joomla-riscv.allarddcs.nl`)
kind: Rule
services:
- name: joomla
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-https
namespace: joomla
spec:
redirectScheme:
scheme: https
permanent: true

175
riscv/joomla/riscv/joomla.yaml Executable file
View File

@@ -0,0 +1,175 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: joomla
namespace: joomla
labels:
app: joomla
spec:
replicas: 1
selector:
matchLabels:
app: joomla
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: joomla
spec:
containers:
- image: riscv64/joomla:5.2.3-php8.1-fpm-alpine
name: joomla
imagePullPolicy: Always
env:
- name: JOOMLA_DB_HOST
value: "mariadb.mariadb"
- name: JOOMLA_DB_USER
value: "joomla"
- name: JOOMLA_DB_PASSWORD
value: "joomla"
# - JOOMLA_DB_PASSWORD_FILE:
# value: "1000"
- name: JOOMLA_DB_NAME
value: "joomla"
- name: JOOMLA_DB_TYPE
value: "mysqli"
- name: JOOMLA_SITE_NAME
value: "allarddcs"
- name: JOOMLA_ADMIN_USER
value: "admin"
- name: JOOMLA_ADMIN_USERNAME
value: "admin"
- name: JOOMLA_ADMIN_PASSWORD
value: "JoomlaJoomla01"
- name: JOOMLA_ADMIN_EMAIL
value: "admin@allarddcs.nl"
# - JOOMLA_EXTENSIONS_URLS:
# value: "1000"
# - JOOMLA_EXTENSIONS_PATHS:
# value: "1000"
# - JOOMLA_SMTP_HOST:
# value: "1000"
# - JOOMLA_SMTP_HOST_PORT:
ports:
- containerPort: 9000
name: php-fpm
protocol: TCP
volumeMounts:
- name: nfs-joomla
mountPath: /var/www/html
subPath: html
- name: nginx
image: riscv64/nginx:1.27.4-alpine
ports:
- containerPort: 80
volumeMounts:
- name: nfs-joomla
mountPath: /var/www/html
subPath: html
- name: nfs-joomla
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
volumes:
- name: nfs-joomla
persistentVolumeClaim:
claimName: joomla-pvc
volumes:
- name: nfs-joomla # < linkname of the volume for the pvc
persistentVolumeClaim:
claimName: joomla-pvc # < pvc name we created in the previous yaml
---
apiVersion: v1
kind: Service
metadata:
name: joomla
namespace: joomla
spec:
selector:
app: joomla
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: joomla-http
namespace: joomla
spec:
entryPoints:
- web
routes:
- match: Host(`joomla-riscv.allarddcs.nl`)
kind: Rule
services:
- name: joomla
port: 80
middlewares:
- name: redirect-to-https
namespace: joomla
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: joomla-tls
namespace: joomla
spec:
entryPoints:
- websecure
routes:
- match: Host(`joomla-riscv.allarddcs.nl`)
kind: Rule
services:
- name: joomla
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-https
namespace: joomla
spec:
redirectScheme:
scheme: https
permanent: true
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: joomla-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/joomla/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: joomla-pvc
namespace: joomla
spec:
storageClassName: ""
volumeName: joomla-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,158 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: joomla
namespace: joomla
spec:
replicas: 1
selector:
matchLabels:
app: joomla
template:
metadata:
labels:
app: joomla
spec:
containers:
- name: php-fpm
- image: riscv64/joomla:5.2.3-php8.1-fpm-alpine
ports:
- containerPort: 9000 # PHP-FPM listens on this port
volumeMounts:
- name: joomla-data
mountPath: /var/www/html
- name: nginx
image: nginx:latest
ports:
- containerPort: 80 # Nginx serves HTTP
volumeMounts:
- name: nfs-joomla
mountPath: /var/www/html
subPath: html
- name: nfs-joomla
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: joomla
namespace: joomla
labels:
app: joomla
spec:
replicas: 1
selector:
matchLabels:
app: joomla
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: joomla
spec:
containers:
- image: riscv64/joomla:5.2.3-php8.1-fpm-alpine
name: joomla
imagePullPolicy: Always
env:
- name: JOOMLA_DB_HOST
value: "mariadb.mariadb"
- name: JOOMLA_DB_USER
value: "joomla"
- name: JOOMLA_DB_PASSWORD
value: "joomla"
# - JOOMLA_DB_PASSWORD_FILE:
# value: "1000"
- name: JOOMLA_DB_NAME
value: "joomla"
- name: JOOMLA_DB_TYPE
value: "mysqli"
- name: JOOMLA_SITE_NAME
value: "allarddcs"
- name: JOOMLA_ADMIN_USER
value: "admon"
- name: JOOMLA_ADMIN_USERNAME
value: "Admin"
- name: JOOMLA_ADMIN_PASSWORD
value: "JoomlaJoomla01"
- name: JOOMLA_ADMIN_EMAIL
value: "admin@allarddcs.nl"
# - JOOMLA_EXTENSIONS_URLS:
# value: "1000"
# - JOOMLA_EXTENSIONS_PATHS:
# value: "1000"
# - JOOMLA_SMTP_HOST:
# value: "1000"
# - JOOMLA_SMTP_HOST_PORT:
ports:
- containerPort: 9000
name: php-fpm
protocol: TCP
- name: nginx
image: riscv64/nginx:1.27.4-alpine
ports:
- containerPort: 80
volumeMounts:
- name: nfs-joomla
mountPath: /var/www/html
subPath: html
- name: nfs-joomla
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
volumes:
- name: nfs-joomla
persistentVolumeClaim:
claimName: joomla-pvc
---
apiVersion: v1
kind: Service
metadata:
name: joomla
namespace: joomla
spec:
selector:
app: nginx
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: joomla-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/joomla/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: joomla-pvc
namespace: joomla
spec:
storageClassName: ""
volumeName: joomla-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,24 @@
#installeren:
INSTALL_K3S_SKIP_DOWNLOAD="true" bash -x k3s-install.sh
#kubeconfig aanmaken:
export KUBECONFIG=~/.kube/config
mkdir ~/.kube 2> /dev/null
sudo k3s kubectl config view --raw > "$KUBECONFIG"
chmod 600 "$KUBECONFIG"
#Node toevoegen:
- eerst token van de server ophalen en bewaren:
export mynodetoken=$(sudo cat /var/lib/rancher/k3s/server/node-token)
- dan op de andere computer k3s installeren als agent:
INSTALL_K3S_SKIP_DOWNLOAD="true" K3S_URL=https://myserver:6443 K3S_TOKEN=$mynodetoken bash -x k3s-install.sh

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-kubernetes
title: Kubernetes (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1 @@
microk8s kubectl -n kube-system create token admin-user --duration=8544h

8
riscv/kubernetes/install.sh Executable file
View File

@@ -0,0 +1,8 @@
sudo /usr/local/bin/k3s-uninstall.sh
sudo cp k3s /usr/local/bin/
INSTALL_K3S_SKIP_DOWNLOAD="true" bash -x k3s-install.sh
export KUBECONFIG=~/.kube/config
mkdir ~/.kube 2> /dev/null
sudo k3s kubectl config view --raw > "$KUBECONFIG"
chmod 600 "$KUBECONFIG"
kubectl get pod -A

View File

@@ -0,0 +1,251 @@
#!/bin/sh
# Copyright 2019 The Kubernetes Authors.
# Copyright 2020 Rancher Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is only meant for use when operating in a non-containerized
# environment but using non-host binaries (i.e. K3s with k3s-root), but
# will fall back to operating in a containerized environment if necessary.
# It relies on the underlying host system not having cgroups set up for PID
# 1, as this is how it detects whether it is operating in a containerized
# environment or not.
# Four step process to inspect for which version of iptables we're operating
# with.
# 1. Detect whether we are operating in a containerized environment by inspecting cgroups for PID 1.
# 2. Run iptables-nft-save and iptables-legacy-save to inspect for rules. If
# no rules are found from either binaries, then
# 3. Check /etc/alternatives/iptables on the host to see if there is a symlink
# pointing towards the iptables binary we are using, if there is, run the
# binary and grep it's output for version higher than 1.8 and "legacy" to see
# if we are operating in legacy
# 4. Last chance is to do a rough check of the operating system, to make an
# educated guess at which mode we can operate in.
# Bugs in iptables-nft 1.8.3 may cause it to get stuck in a loop in
# some circumstances, so we have to run the nft check in a timeout. To
# avoid hitting that timeout, we only bother to even check nft if
# legacy iptables was empty / mostly empty.
mode=unknown
detected_via=unknown
containerized=false
# Check to see if the nf_tables kernel module is loaded, if it is, we should operate in nft mode, else just fall back to legacy. This should only be run when in a container, ideally the klipper-lb container.
nft_module_check() {
lsmod | grep "nf_tables" 2> /dev/null
if [ $? = 0 ]; then
detected_via=modules
mode=nft
else
detected_via=modules
mode=legacy
fi
}
# Check to see if we are containerized -- essentially look at the cgroup for PID 1 and check for things at the end of the "/" which indicates we are in a container (PID 1 shouldn't necessarily have a cgroup)
# there are two cases when we are containerized -- k3d and things that aren't k3s
is_containerized() {
CGT=$(cat /proc/1/cgroup | grep "cpuset" | awk -F: '{print $3}' | sed 's/\///g');
if [ -z $CGT ]; then
containerized=false
else
containerized=true
fi
}
rule_check() {
num_legacy_lines=$( (
iptables-legacy-save || true
ip6tables-legacy-save || true
) 2>/dev/null | grep '^-' | wc -l)
if [ "${num_legacy_lines}" -ge 10 ]; then
detected_via=rules
mode=legacy
else
num_nft_lines=$( (timeout 5 sh -c "iptables-nft-save; ip6tables-nft-save" || true) 2>/dev/null | grep '^-' | wc -l)
if [ "${num_legacy_lines}" -gt "${num_nft_lines}" ]; then
detected_via=rules
mode=legacy
elif [ "${num_nft_lines}" = 0 ]; then
mode=unknown
else
detected_via=rules
mode=nft
fi
fi
}
alternatives_check() {
readlink /etc/alternatives/iptables >/dev/null
if [ $? = 0 ]; then
readlink /etc/alternatives/iptables | grep -q "nft"
if [ $? = 0 ]; then
detected_via=alternatives
mode=nft
else
detected_via=alternatives
mode=legacy
fi
fi
}
# we should not run os-detect if we're being run inside of a container
os_detect() {
# perform some very rudimentary platform detection
lsb_dist=''
dist_version=''
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='debian'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
lsb_dist='fedora'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
lsb_dist='oracleserver'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
lsb_dist='centos'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
lsb_dist='redhat'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
# Special case redhatenterpriseserver
if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
# Set it to redhat, it will be changed to centos below anyways
lsb_dist='redhat'
fi
case "$lsb_dist" in
alpine)
# Alpine is using iptables-legacy by default when you apk add iptables. There exists a iptables-nft subset of commands but they are not
# used by default.
detected_via=os
mode=legacy
;;
ubuntu)
# By default, Ubuntu is using iptables in legacy mode. Ideally, this should have been already caught by the alternatives check.
detected_via=os
mode=legacy
;;
debian | raspbian)
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
# If Debian >= 10 (Buster is 10), then NFT. otherwise, assume it is legacy
if [ "$dist_version" -ge 10 ]; then
detected_via=os
mode=nft
else
detected_via=os
mode=legacy
fi
;;
oracleserver)
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo $dist_version | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
if [ "$maj_ver" -ge 8 ]; then
detected_via=os
mode=nft
else
detected_via=os
mode=legacy
fi
;;
fedora)
# As of 05/15/2020, all Fedora packages appeared to be still `legacy` by default although there is a `iptables-nft` package that installs the nft iptables, so look for that package.
rpm -qa | grep -q "iptables-nft"
if [ $? = 0 ]; then
detected_via=os
mode=nft
else
detected_via=os
mode=legacy
fi
;;
centos | redhat)
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
maj_ver=$(echo $dist_version | sed -E -e "s/^([0-9]+)\.?[0-9]*$/\1/")
if [ "$maj_ver" -ge 8 ]; then
detected_via=os
mode=nft
else
detected_via=os
mode=legacy
fi
;;
# We are running an operating system we don't know, default to nf_tables.
*)
detected_via=unknownos
mode=nft
;;
esac
}
if [ ! -z "$IPTABLES_MODE" ]; then
mode=${IPTABLES_MODE}
else
rule_check
if [ "${mode}" = "unknown" ]; then
is_containerized
# If we're containerized, then just fall back to legacy, in hopes `ip_tables` is loaded.
if [ "${containerized}" = "true" ]; then
mode=legacy
else
alternatives_check
if [ "${mode}" = "unknown" ]; then
os_detect
fi
fi
fi
fi
if [ "${mode}" = "unknown" ]; then
exit 1
fi
if [ "$(basename $0)" = "iptables-detect.sh" ]; then
echo mode is $mode detected via $detected_via and containerized is $containerized
exit 0
fi
xtables-set-mode.sh -m ${mode} >/dev/null
if [ $? = 0 ]; then
exec "$0" "$@"
else
exit 1
fi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-mariadb
title: Mariadb (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

1
riscv/mariadb/create-secret.sh Executable file
View File

@@ -0,0 +1 @@
microk8s kubectl create secret generic mariadb-secret --from-file=username=./username.txt --from-file=password=./password.txt

1
riscv/mariadb/login.sh Executable file
View File

@@ -0,0 +1 @@
microk8s kubectl exec -it mariadb-sts-0 -- mariadb -uroot -psecret -n databases

View File

@@ -0,0 +1,86 @@
apiVersion: v1
kind: Namespace
metadata:
name: mariadb
---
apiVersion: v1
kind: Service
metadata:
name: mariadb
namespace: mariadb
labels:
app: mariadb
spec:
ports:
- port: 3306
name: mariadb-port
clusterIP: None
selector:
app: mariadb
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb-sts
namespace: mariadb
spec:
serviceName: "mariadb"
replicas: 1
selector:
matchLabels:
app: mariadb
template:
metadata:
labels:
app: mariadb
spec:
containers:
- name: mariadb
image: allardkrings/riscv64-mariadb:10.5
ports:
- containerPort: 3306
name: mariadb-port
env:
- name: MARIADB_ROOT_PASSWORD
value: "password"
- name: PMA_HOST
value: 'mariadb'
volumeMounts:
- name: datadir
mountPath: /var/lib/mysql/
volumes:
- name: datadir
persistentVolumeClaim:
claimName: mariadb-pvc
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv
labels:
type: local
spec:
storageClassName: ""
capacity:
storage: 4Gi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/mariadb/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pvc
namespace: mariadb
spec:
storageClassName: ""
volumeName: mariadb-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 4Gi

1
riscv/mariadb/password.txt Executable file
View File

@@ -0,0 +1 @@
secret

1
riscv/mariadb/username.txt Executable file
View File

@@ -0,0 +1 @@
root

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-monica
title: Monica (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,2 @@
user: admin
password: Monica0101@

View File

@@ -0,0 +1,175 @@
apiVersion: v1
kind: Namespace
metadata:
name: monica
---
# Secret for database credentials
apiVersion: v1
kind: Secret
metadata:
name: monica-db-secret
namespace: monica
type: Opaque
stringData:
DB_USERNAME: monica
DB_PASSWORD: monica
---
# Secret for Monica APP_KEY (you can generate a new one with "php artisan key:generate --show")
apiVersion: v1
kind: Secret
metadata:
name: monica-app-secret
namespace: monica
type: Opaque
stringData:
APP_KEY: base64:6McA2wuosOQlpO12vIRl7LPFbNlkxzOqzA8ZPSj7Huk=
---
# Persistent Volume Claim for Monica's storage
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: monica-pvc
namespace: monica
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: monica-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/monica/riscv
readOnly: false
---
# Monica Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: monica
namespace: monica
spec:
replicas: 1
selector:
matchLabels:
app: monica
template:
metadata:
labels:
app: monica
spec:
containers:
- name: monica
image: riscv64/monica:latest
ports:
- containerPort: 80
env:
- name: APP_ENV
value: production
- name: APP_KEY
valueFrom:
secretKeyRef:
name: monica-app-secret
key: APP_KEY
- name: DB_CONNECTION
value: mysql
- name: DB_HOST
value: mariadb.mariadb.svc.cluster.local
- name: DB_DATABASE
value: monica
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: monica-db-secret
key: DB_USERNAME
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: monica-db-secret
key: DB_PASSWORD
- name: DB_PORT
value: "3306"
- name: APP_URL
value: https://monica-riscv.allarddcs
volumeMounts:
- name: monica-data
mountPath: /var/www/html/storage
volumes:
- name: monica-data
persistentVolumeClaim:
claimName: monica-pvc
---
# Service for Monica
apiVersion: v1
kind: Service
metadata:
name: monica
namespace: monica
spec:
type: ClusterIP
selector:
app: monica
ports:
- name: http
port: 80
targetPort: 80
---
# Middleware for HTTP -> HTTPS redirect
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: redirect-to-https
namespace: monica
spec:
redirectScheme:
scheme: https
permanent: true
---
# HTTP IngressRoute
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: monica
namespace: monica
spec:
entryPoints:
- web
routes:
- match: Host(`monica-riscv.allarddcs.nl`)
kind: Rule
middlewares:
- name: redirect-to-https
services:
- name: monica
port: 80
---
# HTTPS IngressRoute (TLS via Let's Encrypt)
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: monica-tls
namespace: monica
spec:
entryPoints:
- websecure
routes:
- match: Host(`monica-riscv.allarddcs.nl`)
kind: Rule
services:
- name: monica
port: 80
tls:
certResolver: letsencrypt

171
riscv/nextcloud/README.md Normal file
View File

@@ -0,0 +1,171 @@
INSIDE THE NEXTCLOUD CONTAINER:
===============================
#controleren of WEBDAV is geinstalleerd:
su -s /bin/sh -c "php occ app:list www-data
=>
Enabled:
- activity: 2.20.0
- circles: 28.0.0
- cloud_federation_api: 1.11.0
- comments: 1.18.0
- contactsinteraction: 1.9.0
- dashboard: 7.8.0
- dav: 1.29.2
- federatedfilesharing: 1.18.0
- federation: 1.18.0
- files: 2.0.0
- files_pdfviewer: 2.9.0
- files_reminders: 1.1.0
- files_sharing: 1.20.0
- files_trashbin: 1.18.0
- files_versions: 1.21.0
- firstrunwizard: 2.17.0
- logreader: 2.13.0
- lookup_server_connector: 1.16.0
- nextcloud_announcements: 1.17.0
- notifications: 2.16.0
- oauth2: 1.16.4
- password_policy: 1.18.0
- photos: 2.4.0
- privacy: 1.12.0
- provisioning_api: 1.18.0
- recommendations: 2.0.0
- related_resources: 1.3.0
- serverinfo: 1.18.0
- settings: 1.10.1
- sharebymail: 1.18.0
- support: 1.11.1
- survey_client: 1.16.0
- systemtags: 1.18.0
- text: 3.9.2
- theming: 2.3.0
- twofactor_backupcodes: 1.17.0
- updatenotification: 1.18.0
- user_status: 1.8.1
- viewer: 2.2.0
- weather_status: 1.8.0
- workflowengine: 2.10.0
Disabled:
- admin_audit: 1.18.0
- bruteforcesettings: 2.8.0
- encryption: 2.16.0
- files_external: 1.20.0
- suspicious_login: 6.0.0
- twofactor_totp: 10.0.0-beta.2
- user_ldap: 1.19.0
#zo niet dan:
su -s /bin/sh -c "php occ app:enable dav" www-data
=> dav already enabled
#controleren of nextcloud alle bestanden in var/www/data kan lezen:
su -s /bin/sh -c "php occ files:scan --all" www-data
=>
Starting scan for user 1 out of 1 (admin)
+---------+-------+-----+---------+---------+--------+--------------+
| Folders | Files | New | Updated | Removed | Errors | Elapsed time |
+---------+-------+-----+---------+---------+--------+--------------+
| 6 | 44 | 0 | 0 | 0 | 0 | 00:00:01 |
+---------+-------+-----+---------+---------+--------+--------------+
#controleren of de user "admin" een directory heeft:
ls -lah /var/www/html/data/admin/
=>
total 16K
drwxrwx--- 4 www-data www-data 4.0K Feb 9 16:47 .
drwxrwx--- 4 www-data www-data 4.0K Feb 9 16:46 ..
drwxrwx--- 2 www-data www-data 4.0K Feb 9 16:47 cache
drwxrwx--- 5 www-data www-data 4.0K Feb 9 16:17 files
su -s /bin/sh -c "php occ user:list" www-data
#controleren of gebruiker bestaat:
=> - admin: admin
#enable gebruiker admin:
su -s /bin/sh -c "php occ user:enable admin" www-data
=> The specified user is enabled
su -s /bin/sh -c "php occ security:bruteforce:reset admin" www-data
=>
#controleren log of specifieke meldingen mbt admin:
cat /var/www/html/data/nextcloud.log | grep "admin"
=>
{"reqId":"up3RdhwXxxGTV3nlIMH9","level":2,
"time":"2025-02-09T20:15:15+00:00",
"remoteAddr":"10.42.0.82",
"user":"--",
"app":"no app in context",
"method":"POST",
"url":"/index.php/login",
"message":"Login failed: admin (Remote IP: 10.42.0.82)",
"userAgent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.3 Safari/605.1.15",
"version":"28.0.14.1",
"data":[]}
#controleren of .htaccess goed is geconfigureerd:
cat /var/www/html/.htaccess | grep dav
=>
RewriteRule ^$ /remote.php/webdav/ [L,R=302]
RewriteRule ^\.well-known/carddav /remote.php/dav/ [R=301,L]
RewriteRule ^\.well-known/caldav /remote.php/dav/ [R=301,L]
# - https://docs.nextcloud.com/server/latest/admin_manual/issues/general_troubleshooting.html#troubleshooting-webdav
#legen cache:
su -s /bin/sh -c "php occ maintenance:mode --on" www-data
=> Nextcloud is in maintenance mode, no apps are loaded.
su -s /bin/sh -c "php occ maintenance:data-fingerprint" www-data
=>
Commands provided by apps are unavailable.
Updated data-fingerprint to 60f73f9b70daee107c27b5a064670c28
su -s /bin/sh -c "php occ cache:clear" www-data
=>
Commands provided by apps are unavailable.
Updated data-fingerprint to 60f73f9b70daee107c27b5a064670c28
There are no commands defined in the "cache" namespace.
su -s /bin/sh -c "php occ maintenance:mode --off" www-data
=>
Maintenance mode disabled
su -s /bin/sh -c "php occ config:system:get overwrite.cli.url" www-data
#OUTSIDE NEXTCLOUD CONTAINER
============================
#herstarten:
kubectl rollout restart deployment nextcloud -n nextcloud
#testen webdav:
curl -v -u admin:Nextcloud01@ -X PROPFIND https://nextcloud-riscv.allarddcs.nl/remote.php/dav/files/admin/

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-nextcloud
title: Nextcloud (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,90 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-http
namespace: nextcloud
spec:
entryPoints:
- web
routes:
- match: Host(`nextcloud-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nginx
port: 80
middlewares:
- name: redirect-to-https
namespace: nextcloud
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nextcloud-tls
namespace: nextcloud
spec:
entryPoints:
- websecure
routes:
- match: Host(`nextcloud-riscv.allarddcs.nl`)
kind: Rule
middlewares:
- name: nextcloud-well-known
- name: nextcloud-headers
services:
- name: nginx
port: 80
- match: Host(`nextcloud-riscv.allarddcs.nl`) && PathPrefix(`/ocs/`)
kind: Rule
middlewares:
- name: nextcloud-well-known
- name: nextcloud-headers
services:
- name: nginx
port: 80
- match: Host(`nextcloud-riscv.allarddcs.nl`) && PathPrefix(`/ocs-provider/`)
kind: Rule
middlewares:
- name: nextcloud-headers
services:
- name: nginx
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-https
namespace: nextcloud
spec:
redirectScheme:
scheme: https
permanent: true
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: nextcloud-headers
namespace: nextcloud
spec:
headers:
stsSeconds: 15552000
browserXssFilter: true
contentTypeNosniff: true
forceSTSHeader: true
frameDeny: true
sslRedirect: true
stsIncludeSubdomains: true
stsPreload: true
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: nextcloud-well-known
namespace: nextcloud
spec:
redirectRegex:
regex: "^/.well-known/(carddav|caldav|webdav)"
replacement: "/remote.php/dav"
permanent: true

130
riscv/nextcloud/nextcloud.yaml Executable file
View File

@@ -0,0 +1,130 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud # < name of the deploymentand reference
namespace: nextcloud
labels:
app: nextcloud # < label for tagging and reference
spec:
replicas: 1 # < number of pods to deploy
selector:
matchLabels:
app: nextcloud
strategy:
rollingUpdate:
maxSurge: 0 # < The number of pods that can be created above the desired amount of pods during an update
maxUnavailable: 1 # < The number of pods that can be unavailable during the update process
type: RollingUpdate # < New pods are added gradually, and old pods are terminated gradually
template:
metadata:
labels:
app: nextcloud
spec:
containers:
- image: riscv64/nextcloud:28.0.14-fpm-alpine # < the name of the docker image we will use
name: nextcloud # < name of container
imagePullPolicy: Always # < always use the latest image when creating container/pod
env: # < environment variables. See https://hub.docker.com/r/linuxserver/nextcloud
- name: PGID
value: "1000" # < group "ubuntu"
- name: PUID
value: "1000" # < user "ubuntu"
- name: MYSQL_HOST
value: mariadb.mariadb.svc.cluster.local
- name: MYSQL_DATABASE
value: "nextcloud"
- name: MYSQL_USER
value: "nextcloud"
- name: MYSQL_PASSWORD
value: "nextcloud"
- name: MYSQL_ROOT_PASSWORD
value: "password"
- name: NEXTCLOUD_HOSTNAME
value: "nextcloud-riscv.allarddcs.nl"
- name: TZ
value: Europe/Amsterdam
- name: OVERWRITEPROTOCOL
value: "https"
- name: APACHE_SERVER_NAME
value: "nextcloud-riscv.allarddcs.nl"
ports:
- containerPort: 9000 # < required network portnumber. See https://hub.docker.com/r/linuxserver/nextcloud
name: http
protocol: TCP
volumeMounts: # < the volume mount in the container. Look at the relation volumelabel->pvc->pv
# - name: nfs-nextcloud
# mountPath: /var/www/html
# subPath: html
- name: nfs-nextcloud
mountPath: /var/www/html/data
subPath: data
- name: nfs-nextcloud
mountPath: /var/www/html/config
subPath: config
# - name: nfs-nextcloud
# mountPath: /var/www/html/custom_apps
# subPath: nextapps
- name: nginx
image: riscv64/nginx:1.27.4-alpine
ports:
- containerPort: 80
volumeMounts:
- name: nfs-nextcloud
mountPath: /var/www/html
subPath: html
- name: nfs-nextcloud
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
volumes:
- name: nfs-nextcloud # < linkname of the volume for the pvc
persistentVolumeClaim:
claimName: nextcloud-pvc # < pvc name we created in the previous yaml
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: nextcloud
spec:
selector:
app: nextcloud
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-pv
spec:
storageClassName: ""
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/nextcloud-riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nextcloud-pvc
namespace: nextcloud
spec:
storageClassName: ""
volumeName: nextcloud-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 10Gi

4
riscv/nextcloud/restart.sh Executable file
View File

@@ -0,0 +1,4 @@
kubectl delete -f nextcloud.yaml
kubectl delete -f nginx-config.yaml
kubectl apply -f nginx-config.yaml
kubectl apply -f nextcloud.yaml

1146
riscv/nextcloud/test.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-nexus
title: Nexus (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID1TCCAr2gAwIBAgIIVTEAVNvb6f8wDQYJKoZIhvcNAQELBQAweDELMAkGA1UE
BhMCVVMxFDASBgNVBAgTC1Vuc3BlY2lmaWVkMRQwEgYDVQQHEwtVbnNwZWNpZmll
ZDERMA8GA1UEChMIU29uYXR5cGUxETAPBgNVBAsTCFNvbmF0eXBlMRcwFQYDVQQD
DA4qLmFsbGFyZGRjcy5ubDAeFw0yNTEwMjYwOTExMzRaFw0zOTA3MDUwOTExMzRa
MHgxCzAJBgNVBAYTAlVTMRQwEgYDVQQIEwtVbnNwZWNpZmllZDEUMBIGA1UEBxML
VW5zcGVjaWZpZWQxETAPBgNVBAoTCFNvbmF0eXBlMREwDwYDVQQLEwhTb25hdHlw
ZTEXMBUGA1UEAwwOKi5hbGxhcmRkY3MubmwwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCkVO2YgFo4sTybZaG4YHCsDHTL2WyAYggnW+yUDei5pnrFTYFk
F7k87xSxs0WeJtf0kiZhjj8dBMqfvSNf0VNKEIw1EBUXcn/R/ymE5aAraQOAsBhu
HPWCnbTZdplUwDR64B9+pn8uZ/qSkfJZ6pCsGcTa/hvl1inWMZJQgiKnkn17WMP9
CSt8BOwy9HpadfSdLt1wkfyNQs5vHsPFwadCfXIgwxhN7NnN4Z9iPU1asfZa6Y2d
ndIocLNIB4YfMfZ15TX/dPGqiJ9qdcsdGMgcqFIC4e+N1reHNubnGKh/CdvP6LGC
IzorF83F81CoMjTKNGTZQ6WBM7qP36Y2NFuxAgMBAAGjYzBhMB0GA1UdDgQWBBQn
IWPFZINR46eiDT4GB4qB+hI/mTBABgNVHREEOTA3ghhuZXh1cy1yaXNjdi5hbGxh
cmRkY3MubmyCG3JlZ2lzdHJ5LXJpc2N2LmFsbGFyZGRjcy5ubDANBgkqhkiG9w0B
AQsFAAOCAQEAD+wcG658hrsu7M5rrKDK7U1qYJMliu6nnU/vl84YRwPHmWgcbrS3
5Q2EudR0PyS1/YsNJH5HAANmu6K6My7/f+l6DBeiONs0FCZAqobgpHy5V8PCIOTt
tIP/lGoZe+USojc8VydTYzdG70AASF5R5No4w0vozDFuQptaVI0AmOH/7WMYjNlW
PSZGKZt/m/9Fd//kDjUvwLvYnGYKTx49GU3ZHyiDLxFdPl9lyLMq9M0jdx2BzLVz
HTtBJ6Rz5WtL1e7cUQclYugNlJoqRrnMNz/M8gVXzTqiqg4AEQYQHZHK1FFhuz55
qUDjUDlP2psKFiKLXzKKyP1ugF/5Pm0/lw==
-----END CERTIFICATE-----

View File

@@ -0,0 +1,32 @@
Bag Attributes
friendlyName: allarddcs.nl
localKeyID: 54 69 6D 65 20 31 37 36 31 34 36 39 39 37 35 39 30 37
Key Attributes: <No Attributes>
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCkVO2YgFo4sTyb
ZaG4YHCsDHTL2WyAYggnW+yUDei5pnrFTYFkF7k87xSxs0WeJtf0kiZhjj8dBMqf
vSNf0VNKEIw1EBUXcn/R/ymE5aAraQOAsBhuHPWCnbTZdplUwDR64B9+pn8uZ/qS
kfJZ6pCsGcTa/hvl1inWMZJQgiKnkn17WMP9CSt8BOwy9HpadfSdLt1wkfyNQs5v
HsPFwadCfXIgwxhN7NnN4Z9iPU1asfZa6Y2dndIocLNIB4YfMfZ15TX/dPGqiJ9q
dcsdGMgcqFIC4e+N1reHNubnGKh/CdvP6LGCIzorF83F81CoMjTKNGTZQ6WBM7qP
36Y2NFuxAgMBAAECggEAAPNppiA+ZWWUVctyWObCwCJ/HbU8WeHQ7XYsTQ8BJOtz
RzQtM3vcdOwznabNfrf3nlENXjMi5yZ7JafdtIg5h/KTw4E8UTTQZVHco6NqGDyb
UJTnda7YaZ3cyTiIDcpIg3PlVFsSRIQSTX00S2Dk3wBz9/BqD08vhS9apJMClrjR
fbJQjuhDJzOdoIbs3+pwCTLJdBDip8QQ6b6WwbSnYBb8yoAofR/GBZUP4Q6EoFER
WrXRaiwAS97KtJO0QrJap7VnC8JBR79irzJ/6C1ZOSomP2NJu571AetLPlSZBNo/
LQcdOTOktV90eYwD6wVEqcHhl99seonH6beDYHO0oQKBgQDEwRbbXFc9gf48Iufh
N8GMGG6oUVhLqib4cGWjIrU6MnpSdE3C/AOkVZqhlhO6Aqwh311pFfhLazdfkqcc
wuaMJhxCNsG8rzOT1zUuiJlTe3Le19TR31qmQtLQ+tsENWrCxL939n5jf470zo0o
9/dgeUKA8hqps6ZgvZVW06C06wKBgQDV0Ig3vDm4VILjRjmPHcSktrF0i9Lbp3uD
OE7uwBItXuO+iLZJHM49vGgHtXsSgqA/B3Kso1Cv31Mh6CNbUVkVvwDht56FuP4R
7s638AE1502cQcc6gKbZlkfBpzeZlkTicdnuzPLw/PoZGaIyD5BgUNjx11nk4AKD
vzwR1sY60wKBgBhUaCcn/AG3GWEGT/YhluVkAAsARBLXL4p5G5hYqmBP7aBUkWkT
EMA5da2ViUrvGan2nO5psRJiZ66By/hagXfDHqtxafOTFqWpbwIaEhuooEO+HKr3
G5aDnN4KpxqWIGWFPsfuyyIym9LZ18rBHu3nELoxNerWNDSyPM1Hzg+RAoGBAMXQ
tYYjPZ+diK7utKgFGX5ujAVQq5eO/0Wq3dQjnW2egcQwxb0kymbxnamsLJ42fj1y
DZVNT4Q3cLlJBRUiUPI+kXlDIYWEXoOG1nf0s5oEUpiDfuhQSI28bMzsgRM2pKqA
POmjcgylcFmyjo4UOjXx9pTg8Yk/+vObBN9YPnQDAoGAO4u2kpFG9n9EkUOpbr8f
x7VLpublPyWaL8PxqPR/816eaQwibf++Kd8d5pc8ILhCLGljbUIysR7DcfFOKaTU
qkMS699oq/zRD+VzeS5DxQhCAEzBezYf1+SYwJ9kCw4kduS+jgLgzpZTjlnZt65W
FyIeu0701aGQIYPMHjetBAE=
-----END PRIVATE KEY-----

Binary file not shown.

View File

@@ -0,0 +1,4 @@
kubectl create secret tls nexus-cert \
--cert=allarddcs.nl.cert \
--key=allarddcs.nl.key \
-n nexus

View File

@@ -0,0 +1,10 @@
keytool -genkeypair -keystore keystore.jks -storepass password -alias allarddcs.nl \
-keyalg RSA -keysize 2048 -validity 5000 -keypass password \
-dname 'CN=*.allarddcs.nl, OU=Sonatype, O=Sonatype, L=Unspecified, ST=Unspecified, C=US' \
-ext 'SAN=DNS:nexus-riscv.allarddcs.nl,DNS:registry-riscv.allarddcs.nl'
keytool -exportcert -keystore keystore.jks -alias allarddcs.nl -rfc > allarddcs.nl.cert
keytool -importkeystore -srckeystore keystore.jks -destkeystore allarddcs.nl.p12 -deststoretype PKCS12
openssl pkcs12 -nocerts -nodes -in allarddcs.nl.p12 -out allarddcs.nl.key

Binary file not shown.

View File

@@ -0,0 +1,156 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nexus
namespace: nexus
labels:
app: nexus
spec:
replicas: 1
selector:
matchLabels:
app: nexus
template:
metadata:
labels:
app: nexus
spec:
containers:
- name: nexus
image: allardkrings/riscv64-nexus
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
ports:
- containerPort: 8081
name: web
- containerPort: 8443
name: websecure
- containerPort: 8444
name: docker
volumeMounts:
# Nexus work directory
- mountPath: /opt/sonatype/sonatype-work/nexus3
name: nexus-data
subPath: data-dir
# SSL keystore
- mountPath: /opt/sonatype/nexus/etc/ssl
name: nexus-data
subPath: ssl
env:
- name: INSTALL4J_ADD_VM_PARAMS
value: "-XX:ActiveProcessorCount=4 -Djava.util.prefs.userRoot=/opt/sonatype/sonatype-work/nexus3/javaprefs"
- name: NEXUS_SECURITY_SSL_KEYSTORE_PATH
value: /opt/sonatype/nexus/etc/ssl/allarddcs.nl.p12
- name: NEXUS_SECURITY_SSL_KEYSTORE_PASSWORD
value: "password"
volumes:
- name: nexus-data
persistentVolumeClaim:
claimName: nexus-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nexus
namespace: nexus
spec:
ports:
- name: web
targetPort: 8081
port: 8081
- name: websecure
targetPort: 8443
port: 8443
- name: docker
targetPort: 8444
port: 8444
selector:
app: nexus
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nexus-http
namespace: nexus
spec:
entryPoints:
- web
routes:
- match: Host(`nexus-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nexus
port: 8081
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: nexus-tls
namespace: nexus
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`nexus-riscv.allarddcs.nl`)
services:
- name: nexus
port: 8443
tls:
passthrough: true
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: registry-tcp-tls
namespace: nexus
spec:
entryPoints:
- docker
routes:
- match: HostSNI(`registry-riscv.allarddcs.nl`)
services:
- name: nexus
port: 8444
tls:
passthrough: true
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nexus-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/nexus/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nexus-pvc
namespace: nexus
spec:
storageClassName: ""
volumeName: nexus-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,126 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nexus
namespace: nexus
labels:
app: nexus
spec:
replicas: 1
selector:
matchLabels:
app: nexus
template:
metadata:
labels:
app: nexus
spec:
containers:
- name: nexus
image: allardkrings/riscv64-nexus
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
ports:
- containerPort: 8081
name: web
- containerPort: 8443
name: websecure
- containerPort: 8444
name: docker
volumeMounts:
# Nexus work directory
- mountPath: /opt/sonatype/sonatype-work/nexus3
name: nexus-data
subPath: data-dir
env:
- name: INSTALL4J_ADD_VM_PARAMS
value: "-XX:ActiveProcessorCount=4 -Djava.util.prefs.userRoot=/opt/sonatype/sonatype-work/nexus3/javaprefs"
volumes:
- name: nexus-data
persistentVolumeClaim:
claimName: nexus-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nexus
namespace: nexus
spec:
ports:
- name: web
targetPort: 8081
port: 8081
selector:
app: nexus
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nexus-http
namespace: nexus
spec:
entryPoints:
- web
routes:
- match: Host(`nexus-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nexus
port: 8081
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: nexus-tls
namespace: nexus
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`nexus-riscv.allarddcs.nl`)
services:
- name: nexus
port: 8081
tls:
passthrough: true
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nexus-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/nexus/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nexus-pvc
namespace: nexus
spec:
storageClassName: ""
volumeName: nexus-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

123
riscv/nexus/nexus.yaml Executable file
View File

@@ -0,0 +1,123 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nexus
namespace: nexus
labels:
app: nexus
spec:
replicas: 1
selector:
matchLabels:
app: nexus
template:
metadata:
labels:
app: nexus
spec:
containers:
- name: nexus
image: allardkrings/riscv64-nexus
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
ports:
- containerPort: 8081
name: web
volumeMounts:
# Nexus work directory
- mountPath: /opt/sonatype/sonatype-work/nexus3
name: nexus-data
subPath: data-dir
env:
- name: INSTALL4J_ADD_VM_PARAMS
value: "-XX:ActiveProcessorCount=4 -Djava.util.prefs.userRoot=/opt/sonatype/sonatype-work/nexus3/javaprefs"
volumes:
- name: nexus-data
persistentVolumeClaim:
claimName: nexus-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nexus
namespace: nexus
spec:
ports:
- name: web
targetPort: 8081
port: 8081
selector:
app: nexus
type: ClusterIP
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nexus-http
namespace: nexus
spec:
entryPoints:
- web
routes:
- match: Host(`nexus-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nexus
port: 8081
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: nexus-tls
namespace: nexus
spec:
entryPoints:
- websecure
routes:
- match: Host(`nexus-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nexus
port: 8081
tls:
secretName: nexus-cert
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nexus-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/nexus/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nexus-pvc
namespace: nexus
spec:
storageClassName: ""
volumeName: nexus-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-nginx
title: Nginx (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nginx-http
namespace: nginx
spec:
entryPoints:
- web
routes:
- match: Host(`nginx-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nginx
port: 80

View File

@@ -0,0 +1,16 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: nginx-tls
namespace: nginx
spec:
entryPoints:
- websecure
routes:
- match: Host(`nginx-riscv.allarddcs.nl`)
kind: Rule
services:
- name: nginx
port: 80
tls:
certResolver: letsencrypt

80
riscv/nginx/nginx.yaml Executable file
View File

@@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: nginx
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: allardkrings/riscv64-nginx:1.24
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nginx
subPath: html
ports:
- containerPort: 80
volumes:
- name: nginx
persistentVolumeClaim:
claimName: nginx-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: nginx
labels:
name: nginx
spec:
type: NodePort
ports:
- port: 80
nodePort: 30080
name: http
selector:
app: nginx
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/nginx-riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-pvc
namespace: nginx
spec:
storageClassName: ""
volumeName: nginx-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-olproperties
title: Olproperties (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: olproperties
labels:
app: olproperties
spec:
replicas: 1
selector:
matchLabels:
app: olproperties
template:
metadata:
labels:
app: olproperties
spec:
containers:
- name: olproperties
image: allardkrings/riscv64-olproperties:1.0
imagePullPolicy: Always
ports:
- containerPort: 9080
imagePullSecrets:
- name: registry-credentials
---
apiVersion: v1
kind: Service
metadata:
name: olproperties
spec:
type: ClusterIP
ports:
- name: port-0
port: 9080
selector:
app: olproperties
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: olproperties-tls
spec:
entryPoints:
- websecure
routes:
- match: Host(`olproperties-riscv.allarddcs.nl`)
kind: Rule
services:
- name: olproperties
port: 9080
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,44 @@
# syntax=docker/dockerfile:1
FROM ubuntu:22.04
# Avoid interactive prompts
ENV DEBIAN_FRONTEND=noninteractive
# Install JDK, bash, and utilities
RUN apt-get update && \
apt-get install -y --no-install-recommends \
openjdk-17-jdk-headless \
bash curl unzip ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Set JAVA_HOME and PATH
ENV JAVA_HOME=/usr/lib/jvm/java-17-openjdk-riscv64
ENV PATH="${JAVA_HOME}/bin:${PATH}"
# Open Liberty version and paths
ENV OPENLIBERTY_HOME=/opt/openliberty
ENV OPENLIBERTY_VERSION=24.0.0.11
ENV OPENLIBERTY_FLAVOR=wlp-webProfile8
ENV OPENLIBERTY_URL="https://public.dhe.ibm.com/ibmdl/export/pub/software/openliberty/runtime/release/${OPENLIBERTY_VERSION}/${OPENLIBERTY_FLAVOR}-${OPENLIBERTY_VERSION}.zip"
# Download and install Open Liberty
RUN mkdir -p ${OPENLIBERTY_HOME} && \
echo "Downloading Open Liberty from ${OPENLIBERTY_URL}" && \
curl -fSL --retry 5 --retry-delay 5 ${OPENLIBERTY_URL} -o /tmp/openliberty.zip && \
unzip -q /tmp/openliberty.zip -d /opt && \
mv /opt/wlp ${OPENLIBERTY_HOME} && \
rm /tmp/openliberty.zip
# Make server script executable
RUN chmod +x ${OPENLIBERTY_HOME}/bin/server
# Create default server
RUN ${OPENLIBERTY_HOME}/bin/server create defaultServer
# Expose HTTP and HTTPS ports
EXPOSE 9080 9443
WORKDIR ${OPENLIBERTY_HOME}
# Run default server
CMD ["bin/server", "run", "defaultServer"]

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-openliberty
title: Openliberty (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-pgadmin
title: Pgadmin (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,206 @@
# https://github.com/akmalovaa/zabbix-docker/blob/main/docker-compose.yml
services:
zabbix-server:
image: ${ZABBIX_SERVER_IMAGE:-zabbix/zabbix-server-pgsql:ubuntu-7.0-latest}
container_name: server
restart: unless-stopped
ports:
- "10051:10051"
environment:
DB_SERVER_HOST: postgres
DB_SERVER_PORT: 5432
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
depends_on:
- postgres
networks:
- network-zabbix
zabbix-frontend:
image: ${ZABBIX_FRONTEND_IMAGE:-zabbix/zabbix-web-nginx-pgsql:ubuntu-7.0-latest}
restart: unless-stopped
container_name: frontend
ports:
- "8080:8080"
- "8443:8443"
environment:
DB_SERVER_HOST: postgres
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
PHP_TZ: ${TZ}
ZBX_SERVER_HOST: zabbix-server
ZBX_SERVER_PORT: 5432
depends_on:
- zabbix-server
networks:
- network-zabbix
# zabbix-agent:
# image: ${ZABBIX_AGENT_IMAGE:-zabbix/zabbix-agent:ubuntu-7.0-latest}
# container_name: agent
# restart: unless-stopped
# ports:
# - "10050:10050"
# environment:
# ZBX_ACTIVE_ALLOW: false
# TZ: ${TZ}
# ZBX_SERVER_HOST: zabbix-server
# ZBX_SERVER_PORT: 10051
# ZBX_HOSTNAME: zabbix-agent
# ZBX_HOSTNAMEITEM: system.hostname
# depends_on:
# - zabbix-server
# networks:
# - network-zabbix
postgres:
image: ${POSTGRES_IMAGE:-postgres:latest}
container_name: postgres
restart: unless-stopped
ports:
- "7432:5432"
- "7433:5433"
volumes:
- postgres:/var/lib/postgresql/data
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
PG_DATA: /var/lib/postgresql/data/pgdata
networks:
- network-zabbix
# grafana:
# image: ${GRAFANA_IMAGE:-grafana/grafana}
# container_name: grafana
# restart: unless-stopped
# ports:
# - "3333:3333"
# environment:
# GF_SECURITY_ADMIN_USER: ${GRAFANA_USER:-admin}
# GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_SECRET:-12345}
# GF_INSTALL_PLUGINS: alexanderzobnin-zabbix-app
# TZ: ${TZ}
# user: "472"
# volumes:
# - grafana:/var/lib/grafana
# - ./grafana/grafana.ini:/etc/grafana/grafana.ini
# - ./grafana/provisioning:/etc/grafana/provisioning
# depends_on:
# - zabbix-frontend
# networks:
# - network-zabbix
volumes:
postgres: {}
# grafana: {}
networks:
network-zabbix:
driver: bridge

102
riscv/pgadmin/pgadmin.yaml Executable file
View File

@@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
namespace: postgres
labels:
app: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
containers:
- name: pgadmin
image: allardkrings/riscv64-pgadmin4:8.8
ports:
- containerPort: 80
env:
- name: PGADMIN_DEFAULT_EMAIL
value: admin@alldcs.nl
- name: PGADMIN_DEFAULT_PASSWORD
value: Pgadmin01@
volumeMounts:
- mountPath: /var/lib/pgadmin
name: pgadmin
volumes:
- name: pgadmin
persistentVolumeClaim:
claimName: pgadmin-pvc
---
apiVersion: v1
kind: Service
metadata:
name: pgadmin
namespace: postgres
labels:
name: pgadmin
spec:
selector:
app.kubernetes.io/name: pgadmin
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: pgadmin
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: pgadmin-tls
namespace: postgres
spec:
entryPoints:
- websecure
routes:
- match: Host(`pgadmin-riscv.allarddcs.nl`)
kind: Rule
services:
- name: pgadmin
port: 80
tls:
certResolver: letsencrypt
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-pv
spec:
storageClassName: ""
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/pgadmin/riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: pgadmin-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-phpmyadmin
title: Phpmyadmin (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: phpmyadmin
namespace: mariadb
labels:
app: phpmyadmin
spec:
replicas: 1
selector:
matchLabels:
app: phpmyadmin
template:
metadata:
labels:
app: phpmyadmin
spec:
containers:
- name: phpmyadmin
image: allardkrings/riscv64-phpmyadmin:5.2.1.1
ports:
- containerPort: 80
env:
- name: PMA_HOST
value: mariadb.mariadb
- name: PMA_PORT
value: "3306"
- name: MYSQL_ROOT_PASSWORD
value: "password"
---
apiVersion: v1
kind: Service
metadata:
name: phpmyadmin
namespace: mariadb
spec:
selector:
app: phpmyadmin
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: phpmyadmin-tls
namespace: mariadb
spec:
entryPoints:
- websecure
routes:
- match: Host(`phpmyadmin-riscv.allarddcs.nl`)
kind: Rule
services:
- name: phpmyadmin
port: 80
tls:
certResolver: letsencrypt

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-postgres14
title: Postgres14 (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,85 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres14-pv
spec:
storageClassName: ""
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- hard
- nfsvers=4.1
nfs:
server: 192.168.2.110
path: /mnt/nfs_share/postgres14riscv
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres14-pvc
namespace: postgres
spec:
storageClassName: ""
volumeName: postgres14-pv
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres14
namespace: postgres
spec:
serviceName: postgres14
replicas: 1
selector:
matchLabels:
app: postgres14
template:
metadata:
labels:
app: postgres14
spec:
containers:
- name: postgres14
image: allardkrings/riscv64-postgres:14
ports:
- containerPort: 5432
env:
- name: POSTGRES_DB
value: postgres
- name: POSTGRES_USER
value: admin
- name: POSTGRES_PASSWORD
value: Pgadmin01@
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres
volumes:
- name: postgres
persistentVolumeClaim:
claimName: postgres14-pvc
---
apiVersion: v1
kind: Service
metadata:
name: postgres14
namespace: postgres
labels:
name: postgres14
spec:
type: ClusterIP
ports:
- port: 5432
name: postgres
selector:
app: postgres14

83
riscv/traefik/README.md Normal file
View File

@@ -0,0 +1,83 @@
#Installatie:
Gewoon K3S installeren, daar zit stadaard traefik 2 in.
Deze traefik is geinstalleerd via de in K3S ingebouwde helm.
Test: kubectl get svc -n kube-system: nu zie je alleen poort 80 en poort 443.
#versie traefik:
kubectl exec -it traefik-765df5f764-br4rs -n kube-system -- traefik version
geeft:
Version: 2.10.3
Codename: saintmarcelin
Go version: go1.20.6
Built: 2023-07-19T09:18:04Z
OS/Arch: linux/riscv64
#dashboard enablen
kubectl apply -f traefik-custom-conf.yaml
(Dit is een helm-configuratie die de via helm geinstalleerde traefik aanpast).
K3S stoppen en starten. Het duurt even voordat de traefik-service op beide nodes weer in de lucht is.
Test: kubectl get svc -n kube-system: nu zie je ook poort 9000 voor het dashboard opduiken
Het traefik-dashboard is nu via nodeport te benaderen.
De ingressroutes werken echter nog niet en verschijnen ook nog niet op het dashboard
Als je in de logging van de traefik-pod kijkt ziet je ook dat er foutmeldingen ontstaan
dat objecten niet gevonden worden.
#time-out vergroten (als die bijvoorbeeld optreden bij pushen van images naar nexus)
KUBE_EDITOR=nano kubectl edit deploy traefik -n kube-system
dan de volgende args toevoegen:
- --entryPoints.web.transport.respondingTimeouts.readTimeout=600s
- --entryPoints.websecure.transport.respondingTimeouts.readTimeout=600s
en dan traefik herstarten:
kubectl rollout status deploy traefik -n kube-system
#verdere stappen:
migreer van traefik.containo.us naar traefik.io:
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml
pas autorisaties aan:
kubectl apply -f rbac.yaml
kubectl apply -f clusterrolbinding-admin.yaml
#Achtergrondinfo:
In v2.10, the Kubernetes CRDs API Group: 'traefik.containo.us' is deprecated,
and its support will end starting with Traefik v3.
Please use the API Group traefik.io instead.
As the Kubernetes CRD provider still works with both API Versions
(traefik.io/v1alpha1 and traefik.containo.us/v1alpha1),
it means that for the same kind, namespace and name,
the provider will only keep the traefik.io/v1alpha1 resource.
In addition, the Kubernetes CRDs API Version traefik.io/v1alpha1
will not be supported in Traefik v3 itself.
Please note that it is a requirement to update the CRDs and the RBAC in the cluster before upgrading Traefik. To do so, please apply the required CRDs and RBAC manifests for v2.10:
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml
kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml

261
riscv/traefik/allard/traefik.yaml Executable file
View File

@@ -0,0 +1,261 @@
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
annotations:
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.containo.us
resources:
- ingressroutes
- ingressroutetcps
- ingressrouteudps
- middlewares
- middlewaretcps
- tlsoptions
- tlsstores
- traefikservices
- serverstransports
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik
---
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: LoadBalancer
selector:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
ports:
- port: 80
name: "web"
targetPort: web
protocol: TCP
- port: 443
name: "websecure"
targetPort: websecure
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
annotations:
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9100"
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: traefik
terminationGracePeriodSeconds: 60
hostNetwork: false
containers:
- image: allardkrings/riscv64-traefik:1.0
imagePullPolicy: IfNotPresent
name: traefik
resources:
readinessProbe:
httpGet:
path: /ping
port: 9000
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /ping
port: 9000
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
lifecycle:
ports:
- name: "metrics"
containerPort: 9100
protocol: "TCP"
- name: "traefik"
containerPort: 9000
protocol: "TCP"
- name: "web"
containerPort: 8000
protocol: "TCP"
- name: "websecure"
containerPort: 8443
protocol: "TCP"
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
volumeMounts:
- name: data
mountPath: /data
- name: tmp
mountPath: /tmp
args:
- "--global.checknewversion"
- "--global.sendanonymoususage"
- "--entrypoints.metrics.address=:9100/tcp"
- "--entrypoints.traefik.address=:9000/tcp"
- "--entrypoints.web.address=:8000/tcp"
- "--entrypoints.websecure.address=:8443/tcp"
- "--api.dashboard=true"
- "--api.insecure=true"
- "--ping=true"
- "--metrics.prometheus=true"
- "--metrics.prometheus.entrypoint=metrics"
- "--providers.kubernetescrd"
- "--providers.kubernetesingress"
- "--entrypoints.websecure.http.tls=true"
- "--certificatesresolvers.letsencrypt.acme.tlschallenge=true"
- "--certificatesresolvers.letsencrypt.acme.email=admin@alldcs.nl"
- "--certificatesresolvers.letsencrypt.acme.storage=/data/letsencrypt.json"
volumes:
- name: data
emptyDir: {}
- name: tmp
emptyDir: {}
securityContext:
fsGroup: 65532
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
name: traefik
spec:
controller: traefik.io/ingress-controller
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: traefik
annotations:
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-21.2.0
app.kubernetes.io/managed-by: Helm
spec:
entryPoints:
- traefik
routes:
- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
kind: Rule
services:
- name: api@internal
kind: TraefikService

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-traefik
title: Traefik (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik
rules:
- apiGroups: ["traefik.io"]
resources: ["ingressroutes", "ingressroutesstatus"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["services", "endpoints", "pods", "secrets"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: traefik
helm.sh/chart: traefik-21.2.1_up21.2.0
name: traefik-kube-system-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- kind: ServiceAccount
name: traefik
namespace: kube-system

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik-ingressroute
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik

65
riscv/traefik/rbac.yaml Normal file
View File

@@ -0,0 +1,65 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.io
- traefik.containo.us
resources:
- middlewares
- middlewaretcps
- ingressroutes
- traefikservices
- ingressroutetcps
- ingressrouteudps
- tlsoptions
- tlsstores
- serverstransports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik
namespace: kube-system

8
riscv/traefik/tlsoption.yaml Executable file
View File

@@ -0,0 +1,8 @@
apiVersion: traefik.io/v1alpha1
kind: TLSOption
metadata:
name: tsloption
namespace: traefik
spec:
minVersion: VersionTLS12

View File

@@ -0,0 +1,22 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
additionalArguments:
- "--api"
- "--api.dashboard=true"
- "--api.insecure=true"
- "--log.level=DEBUG"
- "--entrypoints.websecure.http.tls=true"
- "--certificatesresolvers.letsencrypt.acme.tlschallenge=true"
- "--certificatesresolvers.letsencrypt.acme.email=admin@allarddcs.nl"
- "--certificatesresolvers.letsencrypt.acme.storage=/data/letsencrypt.json"
ports:
traefik:
expose: true
providers:
kubernetesCRD:
allowCrossNamespace: true

View File

@@ -0,0 +1,14 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: kube-system
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik-riscv.allarddcs.nl`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`))
kind: Rule
services:
- name: api@internal
kind: TraefikService

1
riscv/zabbix/README.md Normal file
View File

@@ -0,0 +1 @@
Dit is een catalogus van alle yamls en values die gebruikt worden in de clusters van Allard

View File

@@ -0,0 +1,11 @@
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: riscv-zabbix
title: Zabbix (riscv)
spec:
type: service
lifecycle: production
owner: platform-team
partOf:
- ../catalog-info.yaml

Some files were not shown because too many files have changed in this diff Show More