more apps

This commit is contained in:
2025-08-24 13:38:37 +02:00
parent 9cb6f8c0e2
commit 2aaae8ee0f
136 changed files with 21915 additions and 0 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: bookstack
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: bookstack
server: https://kubernetes.default.svc
project: default
source:
path: bookstack
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: fileserver
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: fileserver
server: https://kubernetes.default.svc
project: default
source:
path: fileserver
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/forum.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forum
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: forum
server: https://kubernetes.default.svc
project: default
source:
path: forum
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: guacamole
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: guacamole
server: https://kubernetes.default.svc
project: default
source:
path: guacamole
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/jellyfin.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: fellyfin
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: fellyfin
server: https://kubernetes.default.svc
project: default
source:
path: fellyfin
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/jitsi.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jitsi
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: jitsi
server: https://kubernetes.default.svc
project: default
source:
path: jitsi
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/kubevirt.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubevirt
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: kubevirt
server: https://kubernetes.default.svc
project: default
source:
path: kubevirt
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/logging.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logging
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: logging
server: https://kubernetes.default.svc
project: default
source:
path: logging
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/mail.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mail
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: mail
server: https://kubernetes.default.svc
project: default
source:
path: mail
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/matrix.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: matrix
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: matrix
server: https://kubernetes.default.svc
project: default
source:
path: matrix
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nextcloud
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: nextcloud
server: https://kubernetes.default.svc
project: default
source:
path: nextcloud
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: paperless
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: paperless
server: https://kubernetes.default.svc
project: default
source:
path: paperless
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vaultwarden
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: vaultwarden
server: https://kubernetes.default.svc
project: default
source:
path: vaultwarden
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: wordpress
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: wordpress
server: https://kubernetes.default.svc
project: default
source:
path: wordpress
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

10
bookstack/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Bookstack
## Wiki
BookStack is a simple, self-hosted, easy-to-use platform for organising and storing information.
improvements:
smtp settings
metrics
liveness probes
resource limits

47
bookstack/adminer.yaml Normal file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: adminer
namespace: bookstack
labels:
app: adminer
spec:
replicas: 1
selector:
matchLabels:
app: adminer
template:
metadata:
labels:
app: adminer
spec:
containers:
- name: adminer
image: adminer
imagePullPolicy: IfNotPresent
env:
- name: ADMINER_DEFAULT_SERVER
value: db
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: adminer
namespace: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: adminer
sessionAffinity: None
type: ClusterIP

View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- bookstack
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

307
bookstack/bookstack.yaml Normal file
View File

@@ -0,0 +1,307 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: env
namespace: bookstack
data:
# file-like keys
.env: |
#from configmap
# This file, when named as ".env" in the root of your BookStack install
# folder, is used for the core configuration of the application.
# By default this file contains the most common required options but
# a full list of options can be found in the '.env.example.complete' file.
# NOTE: If any of your values contain a space or a hash you will need to
# wrap the entire value in quotes. (eg. MAIL_FROM_NAME="BookStack Mailer")
# Use dark mode by default
# Will be overridden by any existing user/session preference.
APP_DEFAULT_DARK_MODE=true
# Application key
# Used for encryption where needed.
# Run `php artisan key:generate` to generate a valid key.
APP_KEY=base64:Gvel4j1kfhBBoT7aho5ibdozSkf7BwB/4vDfSbMTkiU=
# Application URL
# This must be the root URL that you want to host BookStack on.
# All URLs in BookStack will be generated using this value
# to ensure URLs generated are consistent and secure.
# If you change this in the future you may need to run a command
# to update stored URLs in the database. Command example:
# php artisan bookstack:update-url https://old.example.com https://new.example.com
APP_URL=https://bookstack.undercloud.cf
# Database details
DB_HOST='db'
DB_PORT='3306'
DB_DATABASE='bookstack'
DB_USERNAME='bookstack'
DB_PASSWORD='verysecurePWDBbookstackbookstack'
# Mail system to use
# Can be 'smtp' or 'sendmail'
MAIL_DRIVER=smtp
# Mail sender details
MAIL_FROM_NAME="BookStack"
MAIL_FROM=bookstack@example.com
# SMTP mail options
# These settings can be checked using the "Send a Test Email"
# feature found in the "Settings > Maintenance" area of the system.
MAIL_HOST=localhost
MAIL_PORT=1025
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
# General auth
#AUTH_METHOD=ldap
AUTH_METHOD=standard
# The LDAP host, Adding a port is optional
#LDAP_SERVER=example.com:389
# If using LDAP over SSL you should also define the protocol:
LDAP_SERVER=ldaps://ldap.undercloud.cf:636
# The base DN from where users will be searched within
LDAP_BASE_DN="ou=users,dc=undercloud,dc=cf"
# The full DN and password of the user used to search the server
# Can both be left as 'false' (without quotes) to bind anonymously
LDAP_DN="cn=bookstack,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf"
LDAP_PASS="thisismysecureLDAPPWbookstack"
# A filter to use when searching for users
# The user-provided user-name used to replace any occurrences of '${user}'
# If you're setting this option via other means, such as within a docker-compose.yml,
# you may need escape the $, often using $$ or \$ instead.
# Note: This option cannot be used with the docker-compose.yml `env_file` option.
LDAP_USER_FILTER=(&(uid=${user}))
# Set the LDAP version to use when connecting to the server
# Should be set to 3 in most cases.
LDAP_VERSION=3
# Set the property to use as a unique identifier for this user.
# Stored and used to match LDAP users with existing BookStack users.
# Prefixing the value with 'BIN;' will assume the LDAP service provides the attribute value as
# binary data and BookStack will convert the value to a hexidecimal representation.
# Defaults to 'uid'.
LDAP_ID_ATTRIBUTE=uid
# Set the default 'email' attribute. Defaults to 'mail'
LDAP_EMAIL_ATTRIBUTE=mail
# Set the property to use for a user's display name. Defaults to 'cn'
LDAP_DISPLAY_NAME_ATTRIBUTE=cn
# Set the attribute to use for the user's avatar image.
# Must provide JPEG binary image data.
# Will be used upon login or registration when the user doesn't
# already have an avatar image set.
# Remove this option or set to 'null' to disable LDAP avatar import.
LDAP_THUMBNAIL_ATTRIBUTE=jpegphoto
# Force TLS to be used for LDAP communication.
# Use this if you can but your LDAP support will need to support it and
# you may need to import your certificate to the BookStack host machine.
# Defaults to 'false'.
LDAP_START_TLS=false
# If you need to allow untrusted LDAPS certificates, add the below and uncomment (remove the #)
# Only set this option if debugging or you're absolutely sure it's required for your setup.
# If using php-fpm, you may want to restart it after changing this option to avoid instability.
LDAP_TLS_INSECURE=true
# If you need to debug the details coming from your LDAP server, add the below and uncomment (remove the #)
# Only set this option if debugging since it will block logins and potentially show private details.
#LDAP_DUMP_USER_DETAILS=true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: startup
namespace: bookstack
data:
startup.sh: |
#!/bin/sh
echo "startup..."
#if test ! -f "/config/startup.ran"; then
# touch /config/startup.ran
cp -f /mnt/.env /config/www/.env
#else
# echo "startup ran already!"
#fi
echo "startup done."
#exit 123
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: bookstack
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: bookstack
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: bookstack-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bookstack
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 32Gi
storageClassName: bookstack
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: bookstack
namespace: bookstack
labels:
app: bookstack
spec:
replicas: 1
selector:
matchLabels:
app: bookstack
template:
metadata:
labels:
app: bookstack
spec:
dnsConfig:
options:
- name: ndots
value: "1"
initContainers:
- name: copyappini
image: linuxserver/bookstack:23.05.2
command: ['/bin/startup.sh']
volumeMounts:
- mountPath: "/config"
name: bookstack
- mountPath: "/mnt/.env"
name: env
subPath: .env
- mountPath: /bin/startup.sh
name: startup
subPath: startup.sh
containers:
- name: bookstack
image: linuxserver/bookstack:23.05.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
#lifecycle:
# postStart:
# exec:
# command:
# - "/bin/startup.sh"
#livenessProbe:
# httpGet:
# path: /status
# port: 80
# #httpHeaders:
# #- name: Custom-Header
# # value: Awesome
# initialDelaySeconds: 120
# periodSeconds: 10
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: DB_HOST
value: "db"
- name: DB_PORT
value: "3306"
- name: APP_URL
value: "https://bookstack.undercloud.cf"
- name: DB_USER
valueFrom:
secretKeyRef:
name: bookstack-db
key: username
optional: false
- name: DB_PASS
valueFrom:
secretKeyRef:
name: bookstack-db
key: user.pw
optional: false
- name: DB_DATABASE
value: "bookstack"
volumeMounts:
- mountPath: "/config"
name: bookstack
volumes:
- name: bookstack
persistentVolumeClaim:
claimName: bookstack
- name: env
configMap:
name: env
defaultMode: 0777
items:
- key: ".env"
path: ".env"
- name: startup
configMap:
name: startup
defaultMode: 0700
items:
- key: "startup.sh"
path: "startup.sh"
---
apiVersion: v1
kind: Service
metadata:
name: bookstack
namespace: bookstack
labels:
app: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: bookstack
sessionAffinity: None
type: ClusterIP

216
bookstack/db.yaml Normal file
View File

@@ -0,0 +1,216 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: bookstack-db
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: bookstack
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: bookstack-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 16Gi
storageClassName: bookstack-db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
namespace: bookstack
labels:
app: db
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: mariadb:10.5
imagePullPolicy: "IfNotPresent"
ports:
- name: mysql
containerPort: 3306
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: root.pw
- name: MARIADB_USER
valueFrom:
secretKeyRef:
name: bookstack-db
key: username
optional: false
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: user.pw
optional: false
- name: MARIADB_DATABASE
value: bookstack
#livenessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 120
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
#readinessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
volumeMounts:
- mountPath: /var/lib/mysql
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: db
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: db
namespace: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
selector:
app: db
sessionAffinity: None
type: ClusterIP
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: bookstack-db-backup
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: bookstack
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: bookstack-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db-backup
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: bookstack-db-backup
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db-backup
namespace: bookstack
labels:
app: db-backup
spec:
replicas: 1
selector:
matchLabels:
app: db-backup
template:
metadata:
labels:
app: db-backup
spec:
containers:
- name: db-backup
image: rsprta/mariadb-backup
imagePullPolicy: "IfNotPresent"
env:
- name: CRON_TIMER
value: "@daily"
- name: MARIADB_HOST
value: db
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: root.pw
- name: MARIADB_USER
value: root
- name: MARIADB_PORT
value: "3306"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
persistentVolumeClaim:
claimName: db-backup
readOnly: false

42
bookstack/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: bookstack
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

25
bookstack/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bookstack
namespace: bookstack
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- bookstack.undercloud.cf
secretName: bookstack-tls
rules:
- host: bookstack.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: bookstack
port:
number: 80

6
bookstack/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: bookstack
labels:
prometheus: prometheus

10
bookstack/secrets.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: bookstack-db
namespace: bookstack
type: Opaque
data:
root.pw: dmVyeXNlY3VyZVBXREJib29rc3RhY2tyb290
username: Ym9va3N0YWNr
user.pw: dmVyeXNlY3VyZVBXREJib29rc3RhY2tib29rc3RhY2s=

16
fileserver/README.md Normal file
View File

@@ -0,0 +1,16 @@
# Fileserver
## samba + filebrowser + csi-smb-driver
csi-smb
makes it possible to use SMB shares as kubernetes volumes (volume claims etc)
### improvements:
samba
ldap integration
metrics
liveness probes
resource limits
filebrowser
automatically change password
ldap inntegration

View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: fileserver-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- fileserver
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

View File

@@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: smb.csi.k8s.io
spec:
attachRequired: false
podInfoOnMount: true

View File

@@ -0,0 +1,108 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-smb-controller
namespace: fileserver
spec:
replicas: 1
selector:
matchLabels:
app: csi-smb-controller
template:
metadata:
labels:
app: csi-smb-controller
spec:
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-smb-controller-sa
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.2.0
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace=kube-system"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
cpu: 1
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29642
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: smb
image: registry.k8s.io/sig-storage/smbplugin:v1.9.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metrics-address=0.0.0.0:29644"
ports:
- containerPort: 29642
name: healthz
protocol: TCP
- containerPort: 29644
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -0,0 +1,128 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-smb-node
namespace: fileserver
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-smb-node
template:
metadata:
labels:
app: csi-smb-node
spec:
hostNetwork: true
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-smb-node-sa
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29643
- --v=2
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/smb.csi.k8s.io/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: smb
image: registry.k8s.io/sig-storage/smbplugin:v1.9.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
- "--metrics-address=0.0.0.0:29645"
ports:
- containerPort: 29643
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/
mountPropagation: Bidirectional
name: mountpoint-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/smb.csi.k8s.io
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
name: registration-dir

View File

@@ -0,0 +1,55 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-smb-controller-sa
namespace: fileserver
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-smb-node-sa
namespace: fileserver
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: smb-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: smb-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-smb-controller-sa
namespace: fileserver
roleRef:
kind: ClusterRole
name: smb-external-provisioner-role
apiGroup: rbac.authorization.k8s.io

122
fileserver/filebrowser.yaml Normal file
View File

@@ -0,0 +1,122 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: filebrowser
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: fileserver
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: fileserver-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
#apiVersion: v1
#kind: PersistentVolumeClaim
#metadata:
# name: filebrowser
# namespace: fileserver
#spec:
# accessModes:
# - ReadWriteMany
# resources:
# requests:
# storage: 2Gi
# storageClassName: filebrowser
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: filebrowser-db
namespace: fileserver
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 4Gi
storageClassName: filebrowser
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: filebrowser
namespace: fileserver
labels:
app: filebrowser
spec:
replicas: 1
selector:
matchLabels:
app: filebrowser
template:
metadata:
labels:
app: filebrowser
spec:
initContainers:
- name: createfile
image: debian
command: ["bash", "-c", "touch /database/database.db && ls -la /database"]
volumeMounts:
- mountPath: "/database/"
name: filebrowser-db
containers:
- name: filebrowser
image: filebrowser/filebrowser
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/srv"
name: filebrowser
- mountPath: "/database.db"
name: filebrowser-db
subPath: database.db
volumes:
- name: filebrowser
persistentVolumeClaim:
claimName: samba
- name: filebrowser-db
persistentVolumeClaim:
claimName: filebrowser-db
---
apiVersion: v1
kind: Service
metadata:
name: filebrowser
namespace: fileserver
labels:
app: filebrowser
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: filebrowser
sessionAffinity: None
type: ClusterIP

View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: fileserver
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

25
fileserver/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: fileserver
namespace: fileserver
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- fileserver.undercloud.cf
secretName: fileserver-tls
rules:
- host: fileserver.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: filebrowser
port:
number: 80

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: fileserver
labels:
prometheus: prometheus

119
fileserver/samba.yaml Normal file
View File

@@ -0,0 +1,119 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: samba
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: fileserver
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: fileserver-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: samba
namespace: fileserver
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 64Gi
storageClassName: samba
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: samba
namespace: fileserver
labels:
app: samba
spec:
replicas: 1
selector:
matchLabels:
app: samba
template:
metadata:
labels:
app: samba
spec:
initContainers:
- name: createfolders
image: dperson/samba
command: ["bash", "-c", "mkdir -p /data/music && mkdir -p /data/movies && mkdir -p /data/tvshows"]
volumeMounts:
- mountPath: /data
name: data
containers:
- name: samba
image: dperson/samba
imagePullPolicy: IfNotPresent
ports:
- containerPort: 139
- containerPort: 445
env:
- name: SHARE
value: "data;/data"
- name: SHARE2
value: "music;/music"
- name: SHARE3
value: "movies;/movies"
- name: SHARE4
value: "tvshows;/tvshows"
- name: USER
value: "admin;4IsTheMindKiller"
volumeMounts:
- mountPath: "/data"
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: samba
---
apiVersion: v1
kind: Service
metadata:
name: samba
namespace: fileserver
labels:
app: samba
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: smb1
port: 139
protocol: TCP
targetPort: 139
- name: smb2
port: 445
protocol: TCP
targetPort: 445
selector:
app: samba
sessionAffinity: None
type: ClusterIP

20
forum/README.md Normal file
View File

@@ -0,0 +1,20 @@
# Forum
## phpbb
phpBB is a free flat-forum bulletin board software solution that can be used to stay in touch with a group of people or can power your entire website. With an extensive database of user-created extensions and styles database containing hundreds of style and image packages to customise your board, you can create a very unique forum in minutes.
improvements:
metrics (see https://git.app.uib.no/caleno/helm-charts/-/blob/master/stable/phpbb/templates/deployment.yaml)
health probes (see https://git.app.uib.no/caleno/helm-charts/-/blob/master/stable/phpbb/templates/deployment.yaml)
automate deployment
change logo
create boards and forums
dark theme / cyberpunk theme
smtp setup
create users
disable registration
enable ldap
resource limits
metrics:
there is a metrics exporter

140
forum/backupSchedule.yaml Normal file
View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: forum-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- forum
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

216
forum/db.yaml Normal file
View File

@@ -0,0 +1,216 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: forum-db
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: forum
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: forum-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db
namespace: forum
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 8Gi
storageClassName: forum-db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
namespace: forum
labels:
app: db
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: mariadb:10.5
imagePullPolicy: "IfNotPresent"
ports:
- name: mysql
containerPort: 3306
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: forum-db
key: root.pw
- name: MARIADB_USER
valueFrom:
secretKeyRef:
name: forum-db
key: username
optional: false
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: forum-db
key: user.pw
optional: false
- name: MARIADB_DATABASE
value: phpbb
#livenessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 120
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
#readinessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
volumeMounts:
- mountPath: /var/lib/mysql
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: db
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: db
namespace: forum
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
selector:
app: db
sessionAffinity: None
type: ClusterIP
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: forum-db-backup
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: forum
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: forum-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db-backup
namespace: forum
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 8Gi
storageClassName: forum-db-backup
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db-backup
namespace: forum
labels:
app: db-backup
spec:
replicas: 1
selector:
matchLabels:
app: db-backup
template:
metadata:
labels:
app: db-backup
spec:
containers:
- name: db-backup
image: rsprta/mariadb-backup
imagePullPolicy: "IfNotPresent"
env:
- name: CRON_TIMER
value: "@daily"
- name: MARIADB_HOST
value: db
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: forum-db
key: root.pw
- name: MARIADB_USER
value: root
- name: MARIADB_PORT
value: "3306"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
persistentVolumeClaim:
claimName: db-backup
readOnly: false

42
forum/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: forum
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

25
forum/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: forum
namespace: forum
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- forum.undercloud.cf
secretName: forum-tls
rules:
- host: forum.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: phpbb
port:
number: 80

6
forum/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: forum
labels:
prometheus: prometheus

170
forum/phpbb.yaml Normal file
View File

@@ -0,0 +1,170 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
namespace: forum
data:
config.php: |
<?php
// phpBB 3.3.x auto-generated configuration file
// Do not change anything in this file!
$dbms = 'phpbb\\db\\driver\\mysqli';
$dbhost = 'db';
$dbport = '3306';
$dbname = 'phpbb';
$dbuser = 'phpbb';
$dbpasswd = 'phpbbUserDBPW';
$table_prefix = 'phpbb_';
$phpbb_adm_relative_path = 'adm/';
$acm_type = 'phpbb\\cache\\driver\\file';
@define('PHPBB_INSTALLED', true);
@define('PHPBB_ENVIRONMENT', 'production');
// @define('DEBUG_CONTAINER', true);
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: phpbb
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: forum
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: forum-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
namespace: forum
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 16Gi
storageClassName: phpbb
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: phpbb
namespace: forum
labels:
app: phpbb
spec:
replicas: 1
selector:
matchLabels:
app: phpbb
template:
metadata:
labels:
app: phpbb
spec:
securityContext:
# runAsUser: 1000
# runAsGroup: 1000
fsGroup: 1001
containers:
- name: phpbb
image: bitnami/phpbb:3.3.10
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
- containerPort: 8443
env:
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: PHPBB_DISABLE_SESSION_VALIDATION
value: "true"
- name: PHPBB_DATABASE_HOST
value: "db"
- name: PHPBB_DATABASE_PORT_NUMBER
value: "3306"
- name: PHPBB_USERNAME
value: "admin"
- name: PHPBB_PASSWORD
valueFrom:
secretKeyRef:
name: forum-phpbb-user
key: pw
optional: false
- name: PHPBB_DATABASE_USER
valueFrom:
secretKeyRef:
name: forum-db
key: username
optional: false
- name: PHPBB_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: forum-db
key: user.pw
optional: false
- name: PHPBB_DATABASE_NAME
value: "phpbb"
volumeMounts:
- mountPath: "/bitnami/phpbb"
name: data
- mountPath: /bitnami/phpbb/config.php
name: config
subPath: config.php
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
configMap:
name: config
defaultMode: 0777
items:
- key: "config.php"
path: "config.php"
---
apiVersion: v1
kind: Service
metadata:
name: phpbb
namespace: forum
labels:
app: phpbb
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
- name: https
port: 443
protocol: TCP
targetPort: 8443
selector:
app: phpbb
sessionAffinity: None
type: ClusterIP

19
forum/secrets.yaml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Secret
metadata:
name: forum-db
namespace: forum
type: Opaque
data:
root.pw: cGhwYmJSb290REJQVw==
username: cGhwYmI=
user.pw: cGhwYmJVc2VyREJQVw==
---
apiVersion: v1
kind: Secret
metadata:
name: forum-phpbb-user
namespace: forum
type: Opaque
data:
pw: NElzVGhlTWluZEtpbGxlcg==

BIN
guacamole/.DS_Store vendored Normal file

Binary file not shown.

9
guacamole/README.md Normal file
View File

@@ -0,0 +1,9 @@
# Guacamole
## rdp, vnc and ssh client
improvements:
ldaps or starttls
define connections in ldap
metrics
liveness probes
resource limits

View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: guacamole-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- guacamole
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

42
guacamole/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: guacamole
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

139
guacamole/guacamole.yaml Normal file
View File

@@ -0,0 +1,139 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: configmap
namespace: guacamole
data:
guacamole.properties: |
postgresql-hostname: localhost
postgresql-port: 5432
postgresql-database: guacamole_db
postgresql-username: guacamole
postgresql-password: null
ldap-hostname: ldap.undercloud.cf.
ldap-port: 389
ldap-encryption-method: none
ldap-search-bind-dn: cn=guacamole,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf
ldap-search-bind-password: secureGuacamolePW
ldap-user-base-dn: ou=users,dc=undercloud,dc=cf
ldap-username-attribute: uid
ldap-member-attribute: uniquemember
ldap-group-base-dn: ou=groups,dc=undercloud,dc=cf
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: guacamole
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: guacamole
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: guacamole-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: config
namespace: guacamole
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1G
storageClassName: guacamole
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: guacamole
namespace: guacamole
labels:
app: guacamole
spec:
replicas: 1
selector:
matchLabels:
app: guacamole
template:
metadata:
labels:
app: guacamole
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: guacamole
image: flcontainers/guacamole
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
env:
- name: EXTENSIONS
value: "auth-ldap"
volumeMounts:
- mountPath: /config
name: config
- mountPath: /config/guacamole/guacamole.properties
name: configmap
subPath: guacamole.properties
volumes:
- name: config
persistentVolumeClaim:
claimName: config
readOnly: false
- name: configmap
configMap:
name: configmap
defaultMode: 0777
items:
- key: "guacamole.properties"
path: "guacamole.properties"
---
apiVersion: v1
kind: Service
metadata:
name: guacamole
namespace: guacamole
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
type: ClusterIP
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: guacamole
sessionAffinity: None

25
guacamole/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: guacamole
namespace: guacamole
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- guacamole.undercloud.cf
secretName: guacamole-tls
rules:
- host: guacamole.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: guacamole
port:
number: 80

4
guacamole/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: guacamole

7
jellyfin/README.md Normal file
View File

@@ -0,0 +1,7 @@
# Jellyfin
## File and Music
improvements:
metrics
liveness probes
resource limits

View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: jellyfin-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

42
jellyfin/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: jellyfin
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

25
jellyfin/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin
namespace: jellyfin
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- jellyfin.undercloud.cf
secretName: jellyfin-tls
rules:
- host: jellyfin.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
number: 80

435
jellyfin/jellyfin.yaml Normal file
View File

@@ -0,0 +1,435 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config
namespace: jellyfin
data:
# file-like keys
system.xml: |
<?xml version="1.0" encoding="utf-8"?>
<ServerConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<LogFileRetentionDays>3</LogFileRetentionDays>
<IsStartupWizardCompleted>false</IsStartupWizardCompleted>
<EnableMetrics>true</EnableMetrics>
<EnableNormalizedItemByNameIds>true</EnableNormalizedItemByNameIds>
<IsPortAuthorized>true</IsPortAuthorized>
<QuickConnectAvailable>true</QuickConnectAvailable>
<EnableCaseSensitiveItemIds>true</EnableCaseSensitiveItemIds>
<DisableLiveTvChannelUserDataName>true</DisableLiveTvChannelUserDataName>
<MetadataPath />
<MetadataNetworkPath />
<PreferredMetadataLanguage>en</PreferredMetadataLanguage>
<MetadataCountryCode>US</MetadataCountryCode>
<SortReplaceCharacters>
<string>.</string>
<string>+</string>
<string>%</string>
</SortReplaceCharacters>
<SortRemoveCharacters>
<string>,</string>
<string>&amp;</string>
<string>-</string>
<string>{</string>
<string>}</string>
<string>'</string>
</SortRemoveCharacters>
<SortRemoveWords>
<string>the</string>
<string>a</string>
<string>an</string>
</SortRemoveWords>
<MinResumePct>5</MinResumePct>
<MaxResumePct>90</MaxResumePct>
<MinResumeDurationSeconds>300</MinResumeDurationSeconds>
<MinAudiobookResume>5</MinAudiobookResume>
<MaxAudiobookResume>5</MaxAudiobookResume>
<LibraryMonitorDelay>60</LibraryMonitorDelay>
<ImageSavingConvention>Legacy</ImageSavingConvention>
<MetadataOptions>
<MetadataOptions>
<ItemType>Book</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>Movie</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>MusicVideo</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers>
<string>The Open Movie Database</string>
</DisabledMetadataFetchers>
<MetadataFetcherOrder />
<DisabledImageFetchers>
<string>The Open Movie Database</string>
</DisabledImageFetchers>
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>Series</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>MusicAlbum</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers>
<string>TheAudioDB</string>
</DisabledMetadataFetchers>
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>MusicArtist</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers>
<string>TheAudioDB</string>
</DisabledMetadataFetchers>
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>BoxSet</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>Season</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
<MetadataOptions>
<ItemType>Episode</ItemType>
<DisabledMetadataSavers />
<LocalMetadataReaderOrder />
<DisabledMetadataFetchers />
<MetadataFetcherOrder />
<DisabledImageFetchers />
<ImageFetcherOrder />
</MetadataOptions>
</MetadataOptions>
<SkipDeserializationForBasicTypes>true</SkipDeserializationForBasicTypes>
<ServerName />
<UICulture>en-US</UICulture>
<SaveMetadataHidden>false</SaveMetadataHidden>
<ContentTypes />
<RemoteClientBitrateLimit>0</RemoteClientBitrateLimit>
<EnableFolderView>false</EnableFolderView>
<EnableGroupingIntoCollections>false</EnableGroupingIntoCollections>
<DisplaySpecialsWithinSeasons>true</DisplaySpecialsWithinSeasons>
<CodecsUsed />
<PluginRepositories>
<RepositoryInfo>
<Name>Jellyfin Stable</Name>
<Url>https://repo.jellyfin.org/releases/plugin/manifest-stable.json</Url>
<Enabled>true</Enabled>
</RepositoryInfo>
</PluginRepositories>
<EnableExternalContentInSuggestions>true</EnableExternalContentInSuggestions>
<ImageExtractionTimeoutMs>0</ImageExtractionTimeoutMs>
<PathSubstitutions />
<EnableSlowResponseWarning>true</EnableSlowResponseWarning>
<SlowResponseThresholdMs>500</SlowResponseThresholdMs>
<CorsHosts>
<string>*</string>
</CorsHosts>
<ActivityLogRetentionDays>30</ActivityLogRetentionDays>
<LibraryScanFanoutConcurrency>0</LibraryScanFanoutConcurrency>
<LibraryMetadataRefreshConcurrency>0</LibraryMetadataRefreshConcurrency>
<RemoveOldPlugins>false</RemoveOldPlugins>
<AllowClientLogUpload>true</AllowClientLogUpload>
</ServerConfiguration>
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: jellyfin
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: jellyfin
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: jellyfin-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: config
namespace: jellyfin
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: jellyfin
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: music-jelly
namespace: jellyfin
spec:
storageClassName: ""
capacity:
storage: 32Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- dir_mode=0777
- file_mode=0777
- vers=3.0
csi:
driver: smb.csi.k8s.io
readOnly: false
volumeHandle: "music" # make sure it's a unique id in the cluster
volumeAttributes:
source: "//samba.fileserver.svc.k8aux.undercloud.cf./music"
nodeStageSecretRef:
name: fileserver-smb-account
namespace: jellyfin
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: music
namespace: jellyfin
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
volumeName: music-jelly
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: movies
namespace: jellyfin
spec:
storageClassName: ""
capacity:
storage: 32Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- dir_mode=0777
- file_mode=0777
- vers=3.0
csi:
driver: smb.csi.k8s.io
readOnly: false
volumeHandle: "movies" # make sure it's a unique id in the cluster
volumeAttributes:
source: "//samba.fileserver.svc.k8aux.undercloud.cf./movies"
nodeStageSecretRef:
name: fileserver-smb-account
namespace: jellyfin
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tvshows
namespace: jellyfin
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
volumeName: tvshows
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: tvshows
namespace: jellyfin
spec:
storageClassName: ""
capacity:
storage: 32Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- dir_mode=0777
- file_mode=0777
- vers=3.0
csi:
driver: smb.csi.k8s.io
readOnly: false
volumeHandle: "tvshows" # make sure it's a unique id in the cluster
volumeAttributes:
source: "//samba.fileserver.svc.k8aux.undercloud.cf./tvshows"
nodeStageSecretRef:
name: fileserver-smb-account
namespace: jellyfin
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: movies
namespace: jellyfin
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
volumeName: movies
storageClassName: ""
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
namespace: jellyfin
labels:
app: jellyfin
spec:
replicas: 1
selector:
matchLabels:
app: jellyfin
template:
metadata:
labels:
app: jellyfin
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: jellyfin
image: linuxserver/jellyfin
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8096
- containerPort: 8920
- containerPort: 7359 #udp
- containerPort: 1900 #udp
#livenessProbe:
# httpGet:
# path: /health
# port: 8096
# #httpHeaders:
# #- name: Custom-Header
# # value: Awesome
# initialDelaySeconds: 30
# periodSeconds: 5
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
volumeMounts:
- mountPath: /config
name: config
- mountPath: /data/movies
name: movies
#- mountPath: "/config/system.xml"
# name: config-cm
# subPath: system.xml
volumes:
- name: config
persistentVolumeClaim:
claimName: config
readOnly: false
- name: movies
persistentVolumeClaim:
claimName: movies
readOnly: false
- name: config-cm
configMap:
name: config
items:
- key: "system.xml"
path: "system.xml"
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: jellyfin
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8096
- name: jelly1
port: 8920
protocol: TCP
targetPort: 8920
- name: jelly2
port: 7359
protocol: UDP
targetPort: 7359
- name: jelly3
port: 1900
protocol: UDP
targetPort: 1900
selector:
app: jellyfin
sessionAffinity: None
type: ClusterIP

6
jellyfin/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: jellyfin
labels:
prometheus: prometheus

10
jellyfin/secrets.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: fileserver-smb-account
namespace: jellyfin
type: Opaque
data:
username: YWRtaW4=
password: NElzVGhlTWluZEtpbGxlcg==
domain: bG9jYWxob3N0

View File

@@ -0,0 +1,18 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: jellyfin
namespace: jellyfin
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app: jellyfin
endpoints:
- port: http
#path: /metrics
interval: 5s

10
jitsi/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Jitsi
## Video Conferencing / Telephony
Jitsi ist eine Sammlung freier Software für IP-Telefonie, Videokonferenzen und Instant Messaging.
improvements:
ldap auth
metrics
liveness probes
resource limits

42
jitsi/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: jitsi
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

25
jitsi/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jitsi
namespace: jitsi
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- jitsi.undercloud.cf
secretName: jitsi-tls
rules:
- host: jitsi.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 80

631
jitsi/jitsi.yaml Normal file
View File

@@ -0,0 +1,631 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: jitsi
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: jitsi
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: jitsi-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
namespace: jitsi
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
storageClassName: jitsi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: jitsi
labels:
app: frontend
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
annotations:
#backup.velero.io/backup-volumes: html
labels:
app: frontend
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: frontend
resources:
#requests:
# memory: "10Mi"
# cpu: "250m"
#limits:
# memory: "256Mi"
# cpu: "10m"
image: jitsi/web:web-1.0.7257-1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
- containerPort: 443
env:
- name: TZ
value: "Europe/Berlin"
- name: PUBLIC_URL
value: "https://jitsi.undercloud.cf"
- name: JICOFO_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JICOFO_AUTH_PASSWORD
- name: JVB_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JVB_AUTH_PASSWORD
- name: JIGASI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIGASI_XMPP_PASSWORD
- name: JIBRI_RECORDER_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_RECORDER_PASSWORD
- name: JIBRI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_XMPP_PASSWORD
- name: ENABLE_LETSENCRYPT
value: "0"
- name: XMPP_DOMAIN
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_BOSH_URL_BASE
value: "http://xmpp.jitsi.svc.k8aux.undercloud.cf:5280"
- name: XMPP_PORT
value: "5222"
- name: XMPP_SERVER
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: JVB_ADVERTISE_IPS
value: "2001:470:72f0:2::31,10.0.2.31"
- name: DEFAULT_LANGUAGE
value: "de"
- name: XMPP_AUTH_DOMAIN
value: "auth.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_MUC_DOMAIN
value: "muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_INTERNAL_MUC_DOMAIN
value: "internal-muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
#- name: PUID
# value: "1000"
#- name: PGID
# value: "1000"
#lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "cp -rf /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties.tmp /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties"]
volumeMounts:
- mountPath: /config
name: data
subPath: config
- mountPath: /var/spool/cron/crontabs
name: data
subPath: crontabs
- mountPath: /usr/share/jitsi-meet/transcripts
name: data
subPath: transcripts
volumes:
- name: data
persistentVolumeClaim:
claimName: data
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: frontend
namespace: jitsi
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
ports:
- name: http
port: 80
targetPort: 80
- name: https
port: 443
targetPort: 443
selector:
app: frontend
#sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: xmpp-data
namespace: jitsi
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
storageClassName: jitsi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: xmpp
namespace: jitsi
labels:
app: xmpp
spec:
replicas: 1
selector:
matchLabels:
app: xmpp
template:
metadata:
annotations:
#backup.velero.io/backup-volumes: html
labels:
app: xmpp
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: xmpp
resources:
#requests:
# memory: "10Mi"
# cpu: "250m"
#limits:
# memory: "256Mi"
# cpu: "10m"
image: jitsi/prosody:prosody-0.12.3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5222
- containerPort: 5347
- containerPort: 5280
env:
- name: TZ
value: "Europe/Berlin"
- name: PUBLIC_URL
value: "https://jitsi.undercloud.cf"
- name: JICOFO_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JICOFO_AUTH_PASSWORD
- name: JVB_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JVB_AUTH_PASSWORD
- name: JIGASI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIGASI_XMPP_PASSWORD
- name: JIBRI_RECORDER_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_RECORDER_PASSWORD
- name: JIBRI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_XMPP_PASSWORD
- name: ENABLE_LETSENCRYPT
value: "0"
- name: ENABLE_IPV6
value: "1"
- name: XMPP_DOMAIN
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_BOSH_URL_BASE
value: "http://xmpp.jitsi.svc.k8aux.undercloud.cf:5280"
- name: XMPP_PORT
value: "5222"
- name: XMPP_SERVER
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: JVB_ADVERTISE_IPS
value: "2001:470:72f0:2::31,10.0.2.31"
- name: XMPP_AUTH_DOMAIN
value: "auth.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_MUC_DOMAIN
value: "muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_INTERNAL_MUC_DOMAIN
value: "internal-muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
#- RESET_APPLICATION_SSH_KEY=false # set to true to regenerate and import SSH keys
#- SSH_KEY_TYPE=rsa # SSH key type 'dsa', 'rsa', or 'ecdsa' for generated keys
#- SSH_KEY_LENGTH=2048 # SSH key length for generated keys 2048 => 'rsa','dsa'; 521 => 'ecdsa'
#- name: PUID
# value: "1000"
#- name: PGID
# value: "1000"
#lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "cp -rf /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties.tmp /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties"]
volumeMounts:
- mountPath: /config
name: data
subPath: config
- mountPath: /prosody-plugins-custom
name: data
subPath: plugins
volumes:
- name: data
persistentVolumeClaim:
claimName: xmpp-data
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: xmpp
namespace: jitsi
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
ports:
- name: xmpp1
port: 5222
targetPort: 5222
- name: xmpp2
port: 5347
targetPort: 5347
- name: xmpp3
port: 5280
targetPort: 5280
selector:
app: xmpp
#sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: focus-data
namespace: jitsi
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
storageClassName: jitsi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: focus
namespace: jitsi
labels:
app: focus
spec:
replicas: 1
selector:
matchLabels:
app: focus
template:
metadata:
annotations:
#backup.velero.io/backup-volumes: html
labels:
app: focus
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: focus
resources:
#requests:
# memory: "10Mi"
# cpu: "250m"
#limits:
# memory: "256Mi"
# cpu: "10m"
image: jitsi/jicofo:jicofo-1.0-1029-1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8888
env:
- name: JICOFO_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JICOFO_AUTH_PASSWORD
- name: JVB_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JVB_AUTH_PASSWORD
- name: JIGASI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIGASI_XMPP_PASSWORD
- name: JIBRI_RECORDER_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_RECORDER_PASSWORD
- name: JIBRI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_XMPP_PASSWORD
- name: ENABLE_LETSENCRYPT
value: "0"
- name: ENABLE_IPV6
value: "1"
- name: XMPP_DOMAIN
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_BOSH_URL_BASE
value: "http://xmpp.jitsi.svc.k8aux.undercloud.cf:5280"
- name: XMPP_PORT
value: "5222"
- name: XMPP_SERVER
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: JVB_ADVERTISE_IPS
value: "2001:470:72f0:2::31,10.0.2.31"
- name: XMPP_AUTH_DOMAIN
value: "auth.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_MUC_DOMAIN
value: "muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_INTERNAL_MUC_DOMAIN
value: "internal-muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
#- RESET_APPLICATION_SSH_KEY=false # set to true to regenerate and import SSH keys
#- SSH_KEY_TYPE=rsa # SSH key type 'dsa', 'rsa', or 'ecdsa' for generated keys
#- SSH_KEY_LENGTH=2048 # SSH key length for generated keys 2048 => 'rsa','dsa'; 521 => 'ecdsa'
#- name: PUID
# value: "1000"
#- name: PGID
# value: "1000"
#lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "cp -rf /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties.tmp /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties"]
volumeMounts:
- mountPath: /config
name: data
subPath: config
volumes:
- name: data
persistentVolumeClaim:
claimName: focus-data
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: focus
namespace: jitsi
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
ports:
- name: focus
port: 8888
targetPort: 8888
selector:
app: focus
#sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jvb-data
namespace: jitsi
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
storageClassName: jitsi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jvb
namespace: jitsi
labels:
app: jvb
spec:
replicas: 1
selector:
matchLabels:
app: jvb
template:
metadata:
annotations:
#backup.velero.io/backup-volumes: html
labels:
app: jvb
spec:
dnsConfig:
options:
- name: ndots
value: "1"
#securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# fsGroup: 1000
containers:
- name: jvb
resources:
#requests:
# memory: "10Mi"
# cpu: "250m"
#limits:
# memory: "256Mi"
# cpu: "10m"
image: jitsi/jvb:jvb-2.3-20-gfc17337e-1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 10000
- containerPort: 8080
env:
- name: TZ
value: "Europe/Berlin"
- name: PUBLIC_URL
value: "https://jitsi.undercloud.cf"
- name: JICOFO_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JICOFO_AUTH_PASSWORD
- name: JVB_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JVB_AUTH_PASSWORD
- name: JIGASI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIGASI_XMPP_PASSWORD
- name: JIBRI_RECORDER_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_RECORDER_PASSWORD
- name: JIBRI_XMPP_PASSWORD
valueFrom:
secretKeyRef:
name: jitsi
key: JIBRI_XMPP_PASSWORD
- name: ENABLE_LETSENCRYPT
value: "0"
- name: ENABLE_IPV6
value: "1"
- name: JVB_PORT
value: "10000"
- name: XMPP_DOMAIN
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_BOSH_URL_BASE
value: "http://xmpp.jitsi.svc.k8aux.undercloud.cf:5280"
- name: XMPP_PORT
value: "5222"
- name: XMPP_SERVER
value: "xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: JVB_ADVERTISE_IPS
value: "2001:470:72f0:2::31,10.0.2.31"
- name: JVB_DISABLE_STUN
value: "1"
- name: XMPP_AUTH_DOMAIN
value: "auth.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_MUC_DOMAIN
value: "muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
- name: XMPP_INTERNAL_MUC_DOMAIN
value: "internal-muc.xmpp.jitsi.svc.k8aux.undercloud.cf"
#- RESET_APPLICATION_SSH_KEY=false # set to true to regenerate and import SSH keys
#- SSH_KEY_TYPE=rsa # SSH key type 'dsa', 'rsa', or 'ecdsa' for generated keys
#- SSH_KEY_LENGTH=2048 # SSH key length for generated keys 2048 => 'rsa','dsa'; 521 => 'ecdsa'
#- name: PUID
# value: "1000"
#- name: PGID
# value: "1000"
#lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "cp -rf /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties.tmp /opt/bastillion/jetty/bastillion/WEB-INF/classe/BastillionConfig.properties"]
volumeMounts:
- mountPath: /config
name: data
subPath: config
volumes:
- name: data
persistentVolumeClaim:
claimName: jvb-data
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: jvb
namespace: jitsi
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
ports:
- name: jvb1
port: 10000
targetPort: 10000
- name: jvb2
port: 8080
targetPort: 8080
selector:
app: jvb
#sessionAffinity: None
type: ClusterIP
---

6
jitsi/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: jitsi
labels:
prometheus: prometheus

12
jitsi/secrets.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: jitsi
namespace: jitsi
type: Opaque
data:
JICOFO_AUTH_PASSWORD: a3Jhc3Nlc0pJQ09GT19BVVRIX1BBU1NXT1JE
JVB_AUTH_PASSWORD: a3Jhc3Nlc0pWQl9BVVRIX1BBU1NXT1JE
JIGASI_XMPP_PASSWORD: a3Jhc3Nlc0pJR0FTSV9YTVBQX1BBU1NXT1JE
JIBRI_RECORDER_PASSWORD: a3Jhc3Nlc0pJQlJJX1JFQ09SREVSX1BBU1NXT1JE
JIBRI_XMPP_PASSWORD: a3Jhc3Nlc0pJQlJJX1hNUFBfUEFTU1dPUkQ=

10
kubevirt/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Kubevirt
## virtual Machines in Kubernetes
KubeVirt technology addresses the needs of development teams that have adopted or want to adopt Kubernetes but possess existing Virtual Machine-based workloads that cannot be easily containerized. More specifically, the technology provides a unified development platform where developers can build, modify, and deploy applications residing in both Application Containers as well as Virtual Machines in a common, shared environment.
Benefits are broad and significant. Teams with a reliance on existing virtual machine-based workloads are empowered to rapidly containerize applications. With virtualized workloads placed directly in development workflows, teams can decompose them over time while still leveraging remaining virtualized components as is comfortably desired.
NOT WORKING!
there is no working arm64 version

17
kubevirt/filesystem.yaml Normal file
View File

@@ -0,0 +1,17 @@
#apiVersion: ceph.rook.io/v1
#kind: CephFilesystem
#metadata:
# name: kubevirt
# namespace: rook-ceph
#spec:
# metadataPool:
# replicated:
# size: 3
# dataPools:
# - name: replicated
# replicated:
# size: 3
# preserveFilesystemOnDelete: false
# metadataServer:
# activeCount: 1
# activeStandby: true

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,111 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: kubevirt-cluster-critical
value: 1000000000
globalDefault: false
description: "This priority class should be used for core kubevirt components only."
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
kubevirt.io: virt-operator
name: virt-operator
namespace: kubevirt
spec:
replicas: 2
selector:
matchLabels:
kubevirt.io: virt-operator
strategy:
type: RollingUpdate
template:
metadata:
labels:
kubevirt.io: virt-operator
name: virt-operator
prometheus.kubevirt.io: "true"
name: virt-operator
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: kubevirt.io
operator: In
values:
- virt-operator
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- args:
- --port
- "8443"
- -v
- "2"
command:
- virt-operator
env:
- name: VIRT_OPERATOR_IMAGE
value: quay.io/kubevirt/virt-operator:v0.60.0-alpha.0
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['olm.targetNamespaces']
- name: KUBEVIRT_VERSION
value: v0.60.0-alpha.0
image: quay.io/kubevirt/virt-operator:v0.60.0-alpha.0
imagePullPolicy: IfNotPresent
name: virt-operator
ports:
- containerPort: 8443
name: metrics
protocol: TCP
- containerPort: 8444
name: webhooks
protocol: TCP
readinessProbe:
httpGet:
path: /metrics
port: 8443
scheme: HTTPS
initialDelaySeconds: 5
timeoutSeconds: 10
resources:
requests:
cpu: 10m
memory: 450Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /etc/virt-operator/certificates
name: kubevirt-operator-certs
readOnly: true
- mountPath: /profile-data
name: profile-data
nodeSelector:
kubernetes.io/os: linux
priorityClassName: kubevirt-cluster-critical
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubevirt-operator
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: kubevirt-operator-certs
secret:
optional: true
secretName: kubevirt-operator-certs
- emptyDir: {}
name: profile-data

14
kubevirt/kubevirt.yaml Normal file
View File

@@ -0,0 +1,14 @@
---
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
certificateRotateStrategy: {}
configuration:
developerConfiguration:
featureGates: []
customizeComponents: {}
imagePullPolicy: IfNotPresent
workloadUpdateStrategy: {}

8
kubevirt/namespace.yaml Normal file
View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Namespace
metadata:
name: kubevirt
labels:
prometheus: prometheus
kubevirt.io: ""
pod-security.kubernetes.io/enforce: "privileged"

BIN
logging/.DS_Store vendored Normal file

Binary file not shown.

28
logging/README.md Normal file
View File

@@ -0,0 +1,28 @@
# logging
## beats, elasticsearch, kibana
jeez...
beats collects logs and sends them to logstash
(fluentd or fluentbit would be an elternative)
logstash is the database that stores the data
elasticsearch is the search engine for the data
kibana is the web interface for elasticsearch
there are multiple ways to deploy all of that
the most k8 way is ECK (elastic cloud on kubernetes)
ECK is a operator and CRDs (like rook)
it includes crds for beats, elasticsearch and kibana
BUT NOT LOGSTASH!
logstash might not be needed if one uses filebeat (part of beats) ???
improvements:
get working!
metrics
liveness probes
resource limits

329
logging/beats.yaml Normal file
View File

@@ -0,0 +1,329 @@
---
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: metricbeat
namespace: logging
spec:
type: metricbeat
version: 8.5.0
elasticsearchRef:
name: cluster
kibanaRef:
name: kibana
config:
metricbeat:
autodiscover:
providers:
- hints:
default_config: {}
enabled: "true"
node: ${NODE_NAME}
type: kubernetes
modules:
- module: system
period: 10s
metricsets:
- cpu
- load
- memory
- network
- process
- process_summary
process:
include_top_n:
by_cpu: 5
by_memory: 5
processes:
- .*
- module: system
period: 1m
metricsets:
- filesystem
- fsstat
processors:
- drop_event:
when:
regexp:
system:
filesystem:
mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib)($|/)
- module: kubernetes
period: 10s
node: ${NODE_NAME}
hosts:
- https://${NODE_NAME}:10250
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl:
verification_mode: none
metricsets:
- node
- system
- pod
- container
- volume
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
daemonSet:
podTemplate:
spec:
serviceAccountName: metricbeat
automountServiceAccountToken: true # some older Beat versions are depending on this settings presence in k8s context
containers:
- args:
- -e
- -c
- /etc/beat.yml
- -system.hostfs=/hostfs
name: metricbeat
volumeMounts:
- mountPath: /hostfs/sys/fs/cgroup
name: cgroup
- mountPath: /var/run/docker.sock
name: dockersock
- mountPath: /hostfs/proc
name: proc
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true # Allows to provide richer host metadata
securityContext:
runAsUser: 0
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /sys/fs/cgroup
name: cgroup
- hostPath:
path: /var/run/docker.sock
name: dockersock
- hostPath:
path: /proc
name: proc
---
# permissions needed for metricbeat
# source: https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-kubernetes.html
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metricbeat
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- events
- pods
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- deployments
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/stats
verbs:
- get
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metricbeat
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metricbeat
subjects:
- kind: ServiceAccount
name: metricbeat
namespace: default
roleRef:
kind: ClusterRole
name: metricbeat
apiGroup: rbac.authorization.k8s.io
---
---
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: filebeat
spec:
type: filebeat
version: 8.5.0
elasticsearchRef:
name: elasticsearch
kibanaRef:
name: kibana
config:
filebeat:
autodiscover:
providers:
- type: kubernetes
node: ${NODE_NAME}
hints:
enabled: true
default_config:
type: container
paths:
- /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
daemonSet:
podTemplate:
spec:
serviceAccountName: filebeat
automountServiceAccountToken: true
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true # Allows to provide richer host metadata
containers:
- name: filebeat
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
volumeMounts:
- name: varlogcontainers
mountPath: /var/log/containers
- name: varlogpods
mountPath: /var/log/pods
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumes:
- name: varlogcontainers
hostPath:
path: /var/log/containers
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: default
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
---
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: journalbeat
namespace: logging
spec:
type: journalbeat
version: 7.15.2 # last release of Journalbeat
elasticsearchRef:
name: cluster
config:
journalbeat.inputs:
- paths: []
seek: cursor
cursor_seek_fallback: tail
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
daemonSet:
podTemplate:
spec:
automountServiceAccountToken: true # some older Beat versions are depending on this settings presence in k8s context
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: journalbeat
volumeMounts:
- mountPath: /var/log/journal
name: var-journal
- mountPath: /run/log/journal
name: run-journal
- mountPath: /etc/machine-id
name: machine-id
hostNetwork: true # Allows to provide richer host metadata
securityContext:
runAsUser: 0
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /var/log/journal
name: var-journal
- hostPath:
path: /run/log/journal
name: run-journal
- hostPath:
path: /etc/machine-id
name: machine-id

27
logging/configmaps.yaml Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: logging
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
logstash.conf: |
# all input will come from filebeat, no local logs
input {
beats {
port => 5044
}
}
filter {
}
output {
elasticsearch {
index => "logstash-%{[@metadata][beat]}"
hosts => [ "${ES_HOSTS}" ]
user => "${ES_USER}"
password => "${ES_PASSWORD}"
cacert => '/etc/logstash/certificates/ca.crt'
}
}

5185
logging/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: cluster
spec:
version: 8.6.2
nodeSets:
- name: default
count: 1
volumeClaimTemplates:
- metadata:
name: elasticsearch-data # Do not change this name unless you set up a volume mount for the data path.
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: logging
config:
node.store.allow_mmap: false

42
logging/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: logging
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

26
logging/ingress.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana
namespace: logging
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- hosts:
- kibana.undercloud.cf
secretName: kibana-tls
rules:
- host: kibana.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana-kb-http
port:
number: 5601

9
logging/kibana.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana
spec:
version: 8.6.2
count: 1
elasticsearchRef:
name: cluster

85
logging/logstash.yaml Normal file
View File

@@ -0,0 +1,85 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: logging
labels:
app: logstash
spec:
replicas: 1
selector:
matchLabels:
app: logstash
template:
metadata:
labels:
app: logstash
spec:
dnsConfig:
options:
- name: ndots
value: "1"
containers:
- image: logstash:8.6.2
name: logstash
ports:
- containerPort: 25826
- containerPort: 5044
env:
- name: ES_HOSTS
value: "https://cluster-es-http:9200"
- name: ES_USER
value: "elastic"
- name: ES_PASSWORD
valueFrom:
secretKeyRef:
name: cluster-es-elastic-user
key: elastic
resources: {}
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
- name: cert-ca
mountPath: "/etc/logstash/certificates"
readOnly: true
restartPolicy: Always
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
- name: cert-ca
secret:
secretName: cluster-es-http-certs-public
---
apiVersion: v1
kind: Service
metadata:
labels:
app: logstash
name: logstash
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
type: ClusterIP
ports:
- name: "25826"
port: 25826
targetPort: 25826
- name: "5044"
port: 5044
targetPort: 5044
selector:
app: logstash

6
logging/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: logging
#labels:
# undercloud.cf/cert: "ca"

724
logging/operator.yaml Normal file
View File

@@ -0,0 +1,724 @@
# Source: eck-operator/templates/operator-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: elastic-system
labels:
name: elastic-system
---
# Source: eck-operator/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: v1
kind: Secret
metadata:
name: elastic-webhook-server-cert
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
---
# Source: eck-operator/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
data:
eck.yaml: "log-verbosity: 0\nmetrics-port: 0\ncontainer-registry: docker.elastic.co\ncontainer-suffix: \nmax-concurrent-reconciles: 3\nca-cert-validity: 8760h\nca-cert-rotate-before: 24h\ncert-validity: 8760h\ncert-rotate-before: 24h\nexposed-node-labels: [topology.kubernetes.io/.*,failure-domain.beta.kubernetes.io/.*]\nset-default-security-context: auto-detect\nkube-client-timeout: 60s\nelasticsearch-client-timeout: 180s\ndisable-telemetry: false\ndistribution-channel: all-in-one\nvalidate-storage-class: true\nenable-webhook: true\nwebhook-name: elastic-webhook.k8s.elastic.co\nenable-leader-election: true\nelasticsearch-observation-interval: 10s"
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
rules:
- apiGroups:
- "authorization.k8s.io"
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- elastic-operator-leader
verbs:
- get
- watch
- update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- events
- persistentvolumeclaims
- secrets
- services
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apps
resources:
- deployments
- statefulsets
- daemonsets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- elasticsearch.k8s.elastic.co
resources:
- elasticsearches
- elasticsearches/status
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- autoscaling.k8s.elastic.co
resources:
- elasticsearchautoscalers
- elasticsearchautoscalers/status
- elasticsearchautoscalers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- kibana.k8s.elastic.co
resources:
- kibanas
- kibanas/status
- kibanas/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- apm.k8s.elastic.co
resources:
- apmservers
- apmservers/status
- apmservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- enterprisesearch.k8s.elastic.co
resources:
- enterprisesearches
- enterprisesearches/status
- enterprisesearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- beat.k8s.elastic.co
resources:
- beats
- beats/status
- beats/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- agent.k8s.elastic.co
resources:
- agents
- agents/status
- agents/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- maps.k8s.elastic.co
resources:
- elasticmapsservers
- elasticmapsservers/status
- elasticmapsservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
resources:
- stackconfigpolicies
- stackconfigpolicies/status
- stackconfigpolicies/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "elastic-operator-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["get", "list", "watch"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["get", "list", "watch"]
- apiGroups: ["agent.k8s.elastic.co"]
resources: ["agents"]
verbs: ["get", "list", "watch"]
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["get", "list", "watch"]
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "elastic-operator-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["agent.k8s.elastic.co"]
resources: ["agents"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Source: eck-operator/templates/role-bindings.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elastic-operator
subjects:
- kind: ServiceAccount
name: elastic-operator
namespace: elastic-system
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: v1
kind: Service
metadata:
name: elastic-webhook-server
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
spec:
ports:
- name: https
port: 443
targetPort: 9443
selector:
control-plane: elastic-operator
---
# Source: eck-operator/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
spec:
selector:
matchLabels:
control-plane: elastic-operator
serviceName: elastic-operator
replicas: 1
template:
metadata:
annotations:
# Rename the fields "error" to "error.message" and "source" to "event.source"
# This is to avoid a conflict with the ECS "error" and "source" documents.
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
"checksum/config": 0167077654d0c8023b9201c09b02b9213c73d47b50aab990b1e2e8cd41653ca7
labels:
control-plane: elastic-operator
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: elastic-operator
securityContext:
runAsNonRoot: true
containers:
- image: "docker.elastic.co/eck/eck-operator:2.6.1"
imagePullPolicy: IfNotPresent
name: manager
args:
- "manager"
- "--config=/conf/eck.yaml"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: WEBHOOK_SECRET
value: elastic-webhook-server-cert
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 150Mi
ports:
- containerPort: 9443
name: https-webhook
protocol: TCP
volumeMounts:
- mountPath: "/conf"
name: conf
readOnly: true
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
volumes:
- name: conf
configMap:
name: elastic-operator
- name: cert
secret:
defaultMode: 420
secretName: elastic-webhook-server-cert
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: elastic-webhook.k8s.elastic.co
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.6.1"
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-agent-k8s-elastic-co-v1alpha1-agent
failurePolicy: Ignore
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- agent.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- agents
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-apm-k8s-elastic-co-v1-apmserver
failurePolicy: Ignore
name: elastic-apm-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-apm-k8s-elastic-co-v1beta1-apmserver
failurePolicy: Ignore
name: elastic-apm-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-beat-k8s-elastic-co-v1beta1-beat
failurePolicy: Ignore
name: elastic-beat-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- beat.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- beats
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch
failurePolicy: Ignore
name: elastic-ent-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch
failurePolicy: Ignore
name: elastic-ent-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-elasticsearch-k8s-elastic-co-v1-elasticsearch
failurePolicy: Ignore
name: elastic-es-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch
failurePolicy: Ignore
name: elastic-es-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-kibana-k8s-elastic-co-v1-kibana
failurePolicy: Ignore
name: elastic-kb-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- kibanas
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-kibana-k8s-elastic-co-v1beta1-kibana
failurePolicy: Ignore
name: elastic-kb-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- kibanas
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler
failurePolicy: Ignore
name: elastic-esa-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1beta1]
sideEffects: None
rules:
- apiGroups:
- autoscaling.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- elasticsearchautoscalers
- clientConfig:
caBundle: Cg==
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-scp-k8s-elastic-co-v1alpha1-stackconfigpolicies
failurePolicy: Ignore
name: elastic-scp-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- stackconfigpolicies

24
logging/secrets.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Secret
metadata:
name: elasticsearch
namespace: logging
type: Opaque
data:
pw: ZWxhc3RpY3B3U2FmZQ==
---
apiVersion: v1
data:
elastic: NElzVGhlTWluZEtpbGxlcg==
kind: Secret
metadata:
labels:
common.k8s.elastic.co/type: elasticsearch
eck.k8s.elastic.co/credentials: "true"
eck.k8s.elastic.co/owner-kind: Elasticsearch
eck.k8s.elastic.co/owner-name: cluster
eck.k8s.elastic.co/owner-namespace: logging
elasticsearch.k8s.elastic.co/cluster-name: cluster
name: cluster-es-elastic-user
namespace: logging
type: Opaque

28
logging/storageclass.yaml Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: logging
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: logging
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: logging-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete

54
mail/README.md Normal file
View File

@@ -0,0 +1,54 @@
# docker-mailserver
## mailserver
### A production-ready fullstack but simple containerized mail server (SMTP, IMAP, LDAP, Antispam, Antivirus, etc.). Only configuration files, no SQL database. Keep it simple and versioned. Easy to deploy and upgrade. Originally created by @tomav, this project is now maintained by volunteers since January 2021.
remember:
there are special settings for nginx-ingress:
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-services
namespace: ingress-nginx
data:
636: "openldap/ldap:636"
389: "openldap/ldap:389"
25: "docker-mailserver/docker-mailserver:25::PROXY"
465: "docker-mailserver/docker-mailserver:465::PROXY"
587: "docker-mailserver/docker-mailserver:587::PROXY"
993: "docker-mailserver/docker-mailserver:993::PROXY"
143: "docker-mailserver/docker-mailserver:143::PROXY"
and cert-manager:
hostAliases:
- ip: "2001:470:72f0:f:1::b492"
hostnames:
- "ldap.undercloud.cf"
- "mail.undercloud.cf"
- "smtp.undercloud.cf"
- "imap.undercloud.cf"
- ip: "10.0.91.211"
hostnames:
- "ldap.undercloud.cf"
- "mail.undercloud.cf"
- "smtp.undercloud.cf"
- "imap.undercloud.cf"
improvements:
get working!
metrics
liveness probes
resource limits
# roundcube
## web mail client
### Simple, modern, lightweight & fast web-based email client.
supposedly its abandond and snappy makil is a modern fork
improvements:
get it working!
metrics
liveness probes
resource limits

98
mail/autodiscover.yaml Normal file
View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: autodiscover.environment
namespace: mail
immutable: false
data:
COMPANY_NAME: 'Undercloud'
SUPPORT_URL: 'https://autodiscover.undercloud.cf'
DOMAIN: 'undercloud.cf'
# IMAP configuration (host mandatory to enable)
IMAP_HOST: 'mail.undercloud.cf'
IMAP_PORT: '993'
IMAP_SOCKET: 'SSL'
# POP configuration (host mandatory to enable)
POP_HOST: 'mail.undercloud.cf'
POP_PORT: '995'
POP_SOCKET: 'SSL'
# SMTP configuration (host mandatory to enable)
SMTP_HOST: 'mail.undercloud.cf'
SMTP_PORT: '587'
SMTP_SOCKET: 'STARTTLS'
# MobileSync/ActiveSync configuration (url mandatory to enable)
MOBILESYNC_URL: 'https://sync.undercloud.cf'
MOBILESYNC_NAME: 'sync.undercloud.cf'
# LDAP configuration (host mandatory to enable)
LDAP_HOST: 'ldap.undercloud.cf'
LDAP_PORT: '636'
LDAP_SOCKET: 'SSL'
LDAP_BASE: 'dc=undercloud,dc=cf'
LDAP_USER_FIELD: 'uid'
LDAP_USER_BASE: 'ou=users,dc=undercloud,dc=cf'
LDAP_SEARCH: '(|(objectClass=inetOrgPerson))'
# Apple mobile config identifiers (identifier mandatory to enable)
#PROFILE_IDENTIFIER: 'com.example.autodiscover'
#PROFILE_UUID: '92943D26-CAB3-4086-897D-DC6C0D8B1E86'
#MAIL_UUID: '7A981A9E-D5D0-4EF8-87FE-39FD6A506FAC'
#LDAP_UUID: '6ECB6BA9-2208-4ABF-9E60-4E9F4CD7309E'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: autodiscover
namespace: mail
spec:
replicas: 1
selector:
matchLabels:
app: autodiscover
template:
metadata:
labels:
app: autodiscover
spec:
containers:
- name: autodiscover
image: monogramm/autodiscover-email-settings:a7aee0d
imagePullPolicy: IfNotPresent
ports:
- name: transfer
containerPort: 8000
protocol: TCP
envFrom:
- configMapRef:
name: autodiscover.environment
env:
- name: LDAP_BIND_PW
valueFrom:
secretKeyRef:
name: mailserver-ldap
key: pw
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: autodiscover
namespace: mail
spec:
clusterIP: '2001:470:72f0:f:1::51'
clusterIPs:
- '2001:470:72f0:f:1::51'
- 10.0.91.51
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
# Transfer
- name: transfer
port: 8000
targetPort: 8000
protocol: TCP
selector:
app: autodiscover
#sessionAffinity: None
type: ClusterIP

140
mail/backupSchedule.yaml Normal file
View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: mail-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- mail
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

42
mail/certificates.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: mail
namespace: mail
spec:
# Secret names are always required.
secretName: docker-mailserver-tls
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
subject:
organizations:
- undercloud
# The use of the common name field has been deprecated since 2000 and is
# discouraged from being used.
commonName: mail.undercloud.cf
#isCA: false
privateKey:
algorithm: RSA
encoding: PKCS1
size: 2048
usages:
- server auth
- client auth
# At least one of a DNS Name, URI, or IP address is required.
dnsNames:
- mail.undercloud.cf
- imap.undercloud.cf
- smtp.undercloud.cf
#- ldap.openldap.svc.k8aux.undercloud.cf
#ipAddresses:
# - 192.168.0.5
# Issuer references are always required.
issuerRef:
name: letsencrypt
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
#group: cert-manager.io

367
mail/docker-mailserver.yaml Normal file
View File

@@ -0,0 +1,367 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mailserver.environment
namespace: mail
immutable: false
data:
TLS_LEVEL: modern
POSTSCREEN_ACTION: drop
OVERRIDE_HOSTNAME: mail.undercloud.cf
FAIL2BAN_BLOCKTYPE: drop
POSTMASTER_ADDRESS: postmaster@undercloud.cf
UPDATE_CHECK_INTERVAL: 10d
POSTFIX_INET_PROTOCOLS: ipv6, ipv4
ONE_DIR: '0'
ENABLE_CLAMAV: '0'
ENABLE_POSTGREY: '0'
ENABLE_FAIL2BAN: '1'
AMAVIS_LOGLEVEL: '-1'
SPOOF_PROTECTION: '1'
MOVE_SPAM_TO_JUNK: '1'
ENABLE_UPDATE_CHECK: '1'
ENABLE_SPAMASSASSIN: '1'
SUPERVISOR_LOGLEVEL: warn
SPAMASSASSIN_SPAM_TO_INBOX: '1'
DMS_DEBUG: '1'
ENABLE_POP3: '1'
# here, we provide an example for the SSL configuration
SSL_TYPE: manual
SSL_CERT_PATH: /secrets/ssl/rsa/tls.crt
SSL_KEY_PATH: /secrets/ssl/rsa/tls.key
#ldap
ACCOUNT_PROVISIONER: LDAP
LDAP_SERVER_HOST: ldap.undercloud.cf
LDAP_SEARCH_BASE: dc=undercloud,dc=cf
LDAP_BIND_DN: cn=mailserver,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf
#LDAP_BIND_PW: 'mypassword' # from secret
SPOOF_PROTECTION: '1'
LDAP_QUERY_FILTER_DOMAIN: (|(mail=*@%s)(mailAlias=*@%s)(mailGroupMember=*@%s))
LDAP_QUERY_FILTER_USER: (&(objectClass=inetOrgPerson)(mail=%s))
LDAP_QUERY_FILTER_ALIAS: (&(objectClass=inetOrgPerson)(mailAlias=%s))
LDAP_QUERY_FILTER_GROUP: (&(objectClass=groupOfUniqueNames)(mail=%s))
LDAP_QUERY_FILTER_SENDERS: (&(objectClass=inetOrgPerson)(|(mail=%s)))
DOVECOT_PASS_ATTRS: uid=user,userPassword=password
#DOVECOT_USER_ATTRS: home=/var/mail/%{ldap:uid},=mail=maildir:~/Maildir,uidNumber=uid,gidNumber=gid
DOVECOT_USER_ATTRS: =home=/var/mail/%{ldap:uid},=mail=maildir:~/Maildir,uidNumber=uid,gidNumber=5000
DOVECOT_USER_FILTER: (&(objectClass=person)(|(mail=%u)(uid=%u)))
SASLAUTHD_MECHANISMS: rimap
SASLAUTHD_MECH_OPTIONS: '::1'
#LDAP_START_TLS: 'yes'
#DOVECOT_TLS: 'yes'
#SASLAUTHD_LDAP_START_TLS: 'yes'
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mailserver.files
namespace: mail
data:
# postfix-accounts.cf: |
# sebastian@undercloud.cf|{SHA512-CRYPT}$6$ACOZB1B.2yHv8ePj$9vIW46wFqHfIMlP9.sDE1xtk1XN5OhS6etnvv5AxDPVPMFXXx55dVNwybLAaS/YEKahPg56vE9d6CIl7pYDw41
# glodas@undercloud.cf|{SHA512-CRYPT}$6$ACOZB1B.2yHv8ePj$9vIW46wFqHfIMlP9.sDE1xtk1XN5OhS6etnvv5AxDPVPMFXXx55dVNwybLAaS/YEKahPg56vE9d6CIl7pYDw41
---
kind: ConfigMap
apiVersion: v1
metadata:
name: mailserver.config
namespace: mail
labels:
app: docker-mailserver
#data:
# postfix-main.cf: |
# postscreen_upstream_proxy_protocol = haproxy
# postfix-master.cf: |
# smtp/inet/postscreen_upstream_proxy_protocol=haproxy
# submission/inet/smtpd_upstream_proxy_protocol=haproxy
# smtps/inet/smtpd_upstream_proxy_protocol=haproxy
# dovecot.cf: |
# # Assuming your ingress controller is bound to 10.0.0.0/8
# haproxy_trusted_networks = 10.0.0.0/8, 127.0.0.0/8, 2001:470:72f0::/48, fd00::/48
# service imap-login {
# inet_listener imap {
# haproxy = yes
# }
# inet_listener imaps {
# haproxy = yes
# }
# }
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: docker-mailserver
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: docker-mailserver
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: docker-mailserver-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
namespace: mail
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 25G
storageClassName: docker-mailserver
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docker-mailserver
namespace: mail
annotations:
ignore-check.kube-linter.io/run-as-non-root: >-
'mailserver' needs to run as root
ignore-check.kube-linter.io/privileged-ports: >-
'mailserver' needs privilegdes ports
ignore-check.kube-linter.io/no-read-only-root-fs: >-
There are too many files written to make The
root FS read-only
spec:
replicas: 1
selector:
matchLabels:
app: docker-mailserver
template:
metadata:
labels:
app: docker-mailserver
#annotations:
# container.apparmor.security.beta.kubernetes.io/docker-mailserver: runtime/default
spec:
securityContext:
runAsUser: 0
runAsGroup: 5000
fsGroup: 5000
hostname: mail
#initContainers:
#- name: changeowner
# image: busybox
# command: ["sh", "-c", "chmod +w /var/mail"]
# volumeMounts:
# - name: data
# mountPath: /var/mail
# subPath: data
# readOnly: false
containers:
- name: docker-mailserver
image: docker.io/mailserver/docker-mailserver:latest
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: true
readOnlyRootFilesystem: false
runAsUser: 0
runAsGroup: 5000
runAsNonRoot: false
privileged: true
capabilities:
add:
# file permission capabilities
- CHOWN
- FOWNER
- MKNOD
- SETGID
- SETUID
- DAC_OVERRIDE
# network capabilities
- NET_ADMIN # needed for F2B
- NET_RAW # needed for F2B
- NET_BIND_SERVICE
# miscellaneous capabilities
- SYS_CHROOT
- KILL
drop: [ALL]
seccompProfile:
type: RuntimeDefault
# You want to tune this to your needs. If you disable ClamAV,
# you can use less RAM and CPU. This becomes important in
# case you're low on resources and Kubernetes refuses to
# schedule new pods.
resources:
limits:
memory: 2Gi
cpu: 1500m
requests:
memory: 500Mi
cpu: 600m
volumeMounts:
- name: files
subPath: postfix-accounts.cf
mountPath: /tmp/docker-mailserver/postfix-accounts.cf
readOnly: true
# PVCs
- name: data
mountPath: /var/mail
subPath: data
readOnly: false
- name: data
mountPath: /var/mail-state
subPath: state
readOnly: false
- name: data
mountPath: /var/log/mail
subPath: log
readOnly: false
# certificates
- name: certificates-rsa
mountPath: /secrets/ssl/rsa/
readOnly: true
# other
- name: tmp-files
mountPath: /tmp
readOnly: false
- name: config
subPath: postfix-main.cf
mountPath: /tmp/docker-mailserver/postfix-main.cf
readOnly: true
- name: config
subPath: postfix-master.cf
mountPath: /tmp/docker-mailserver/postfix-master.cf
readOnly: true
- name: config
subPath: dovecot.cf
mountPath: /tmp/docker-mailserver/dovecot.cf
readOnly: true
ports:
- name: transfer
containerPort: 25
protocol: TCP
- name: esmtp-implicit
containerPort: 465
protocol: TCP
- name: esmtp-explicit
containerPort: 587
- name: imap-implicit
containerPort: 993
protocol: TCP
- name: imap
containerPort: 143
protocol: TCP
- name: pop3
containerPort: 110
protocol: TCP
- name: pop3s
containerPort: 995
protocol: TCP
envFrom:
- configMapRef:
name: mailserver.environment
env:
- name: LDAP_BIND_PW
valueFrom:
secretKeyRef:
name: mailserver-ldap
key: pw
restartPolicy: Always
volumes:
# configuration files
- name: files
configMap:
name: mailserver.files
- name: config
configMap:
name: mailserver.config
# PVCs
- name: data
persistentVolumeClaim:
claimName: data
# certificates
- name: certificates-rsa
secret:
secretName: docker-mailserver-tls
items:
- key: tls.key
path: tls.key
- key: tls.crt
path: tls.crt
# other
- name: tmp-files
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: docker-mailserver
namespace: mail
spec:
clusterIP: '2001:470:72f0:f:1::50'
clusterIPs:
- '2001:470:72f0:f:1::50'
- 10.0.91.50
ipFamilies:
- IPv6
- IPv4
#ipFamilyPolicy: SingleStack
ipFamilyPolicy: PreferDualStack
ports:
# Transfer
- name: transfer
port: 25
targetPort: transfer
protocol: TCP
# ESMTP with implicit TLS
- name: esmtp-implicit
port: 465
targetPort: esmtp-implicit
protocol: TCP
# ESMTP with explicit TLS (STARTTLS)
- name: esmtp-explicit
port: 587
targetPort: esmtp-explicit
protocol: TCP
# IMAPS with implicit TLS
- name: imap-implicit
port: 993
targetPort: imap-implicit
protocol: TCP
- name: imap
port: 143
targetPort: imap
protocol: TCP
- name: pop3
port: 110
targetPort: pop3
protocol: TCP
- name: pop3s
port: 995
targetPort: pop3s
protocol: TCP
selector:
app: docker-mailserver
#sessionAffinity: None
type: ClusterIP

42
mail/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: docker-mailserver
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

97
mail/ingress.yaml Normal file
View File

@@ -0,0 +1,97 @@
---
#apiVersion: networking.k8s.io/v1
#kind: Ingress
#metadata:
# labels:
# app: docker-mailserver
# name: mail
# namespace: mail
# annotations:
# nginx.ingress.kubernetes.io/rewrite-target: /
# cert-manager.io/cluster-issuer: "letsencrypt-staging"
# #acme.cert-manager.io/http01-edit-in-place: "true"
#spec:
# tls:
# - hosts:
# - mail.undercloud.cf
# secretName: docker-mailserver-tls
# rules:
# - host: mail.undercloud.cf
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: autoconfig
namespace: mail
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- autoconfig.undercloud.cf
secretName: autoconfig-tls
rules:
- host: autoconfig.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: autodiscover
port:
number: 8000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: autodiscover
namespace: mail
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- autodiscover.undercloud.cf
secretName: autodiscover-tls
rules:
- host: autodiscover.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: autodiscover
port:
number: 8000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: roundcube
namespace: mail
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- roundcube.undercloud.cf
secretName: roundcube-tls
rules:
- host: roundcube.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: roundcubenginx
port:
number: 80

6
mail/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: mail
labels:
undercloud.cf/cert: "ca"

291
mail/roundcube.yaml Normal file
View File

@@ -0,0 +1,291 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: roundcubemail-www
namespace: mail
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: docker-mailserver
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: roundcubemail-temp
namespace: mail
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: docker-mailserver
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: roundcubemail-db
namespace: mail
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: docker-mailserver
---
apiVersion: v1
kind: ConfigMap
metadata:
name: roundcubenginx-config
namespace: mail
data:
default.conf: |
server {
listen [::]:80 default_server;
server_name _;
root /var/www/html;
location / {
try_files $uri /index.php$is_args$args;
}
location ~ \.php(/|$) {
try_files $uri =404;
fastcgi_pass roundcubemail:9000;
fastcgi_read_timeout 300;
proxy_read_timeout 300;
fastcgi_split_path_info ^(.+\.php)(/.*)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT $realpath_root;
internal;
}
client_max_body_size 6m;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: roundcubedb
namespace: mail
labels:
service: roundcubedb
spec:
replicas: 1
selector:
matchLabels:
service: roundcubedb
strategy:
type: Recreate
template:
metadata:
labels:
service: roundcubedb
spec:
containers:
- name: roundcubedb
image: postgres:alpine
imagePullPolicy: ""
env:
- name: POSTGRES_DB
value: roundcube
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: roundcubemail-shared-secret
key: DB_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: roundcubemail-shared-secret
key: DB_PASSWORD
ports:
- containerPort: 5432
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: roundcubemail-db
restartPolicy: Always
serviceAccountName: ""
volumes:
- name: roundcubemail-db
persistentVolumeClaim:
claimName: roundcubemail-db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: roundcubemail
namespace: mail
labels:
service: roundcubemail
spec:
replicas: 1
selector:
matchLabels:
service: roundcubemail
strategy:
type: Recreate
template:
metadata:
labels:
service: roundcubemail
spec:
containers:
- name: roundcubemail
image: roundcube/roundcubemail:1.6.1-fpm-alpine
imagePullPolicy: ""
env:
- name: ROUNDCUBEMAIL_DB_TYPE
value: pgsql
- name: ROUNDCUBEMAIL_DB_HOST
value: roundcubedb
- name: ROUNDCUBEMAIL_DB_NAME
value: roundcube
- name: ROUNDCUBEMAIL_DB_USER
valueFrom:
secretKeyRef:
name: roundcubemail-shared-secret
key: DB_USER
- name: ROUNDCUBEMAIL_DB_PASSWORD
valueFrom:
secretKeyRef:
name: roundcubemail-shared-secret
key: DB_PASSWORD
- name: ROUNDCUBEMAIL_DES_KEY
valueFrom:
secretKeyRef:
name: roundcubemail-shared-secret
key: DES_KEY
- name: ROUNDCUBEMAIL_DEFAULT_HOST
value: tls://imap.undercloud.cf.
- name: ROUNDCUBEMAIL_SMTP_SERVER
value: tls://smtp.undercloud.cf.
- name: ROUNDCUBEMAIL_SKIN
value: elastic
- name: ROUNDCUBEMAIL_PLUGINS
value: archive,zipdownload,newmail_notifier
ports:
- containerPort: 9000
volumeMounts:
- mountPath: /var/www/html
name: www-data
- mountPath: /tmp/roundcube-temp
name: temp-data
restartPolicy: Always
# serviceAccountName: ""
volumes:
- name: www-data
persistentVolumeClaim:
claimName: roundcubemail-www
- name: temp-data
persistentVolumeClaim:
claimName: roundcubemail-temp
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: roundcubenginx
namespace: mail
labels:
app: roundcubenginx
spec:
replicas: 1
selector:
matchLabels:
app: roundcubenginx
strategy:
type: Recreate
template:
metadata:
labels:
app: roundcubenginx
spec:
containers:
- name: roundcubenginx
image: nginx
imagePullPolicy: IfNotPresent
env:
- name: NGINX_HOST
value: localhost
- name: NGINX_PHP_CGI
value: roundcubemail:9000
ports:
- containerPort: 80
volumeMounts:
- name: www-data
mountPath: /var/www/html
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
restartPolicy: Always
serviceAccountName: ""
volumes:
- name: www-data
persistentVolumeClaim:
claimName: roundcubemail-www
- name: nginx-config
configMap:
name: roundcubenginx-config
---
apiVersion: v1
kind: Service
metadata:
name: roundcubedb
namespace: mail
labels:
service: roundcubedb
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
type: ClusterIP
ports:
- port: 5432
targetPort: 5432
protocol: TCP
selector:
service: roundcubedb
---
apiVersion: v1
kind: Service
metadata:
name: roundcubemail
namespace: mail
labels:
service: roundcubemail
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
type: ClusterIP
ports:
- port: 9000
targetPort: 9000
protocol: TCP
selector:
service: roundcubemail
---
apiVersion: v1
kind: Service
metadata:
name: roundcubenginx
namespace: mail
labels:
app: roundcubenginx
spec:
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
selector:
app: roundcubenginx

20
mail/secrets.yaml Normal file
View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: mailserver-ldap
namespace: mail
type: Opaque
data:
pw: c2VjdXJlUFdtYWlsc2VydmVy
---
---
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: roundcubemail-shared-secret
namespace: mail
stringData:
DES_KEY: 'a-super-random-value56'
DB_USER: roundcube
DB_PASSWORD: roundcubePwd2

17
matrix/README.md Normal file
View File

@@ -0,0 +1,17 @@
# Matrix
## Synapse + Element
Matrix is an open standard and communication protocol for real-time communication.
### Synapse
Synapse is an open-source Matrix homeserver written and maintained by the Matrix.org Foundation. We began rapid development in 2014, reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues in earnest today.
### Element
Element is a Matrix-based end-to-end encrypted messenger and secure collaboration app
improvements:
metrics
resource limits
email
enable capcha

140
matrix/backupSchedule.yaml Normal file
View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: matrix-backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- matrix
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

226
matrix/db.yaml Normal file
View File

@@ -0,0 +1,226 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: matrix-db
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: matrix
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: matrix-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db
namespace: matrix
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 4Gi
storageClassName: matrix-db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
namespace: matrix
labels:
app: db
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: postgres
imagePullPolicy: "IfNotPresent"
ports:
- name: mysql
containerPort: 5432
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: matrix-db
key: root.pw
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: matrix-db
key: username
optional: false
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: matrix-db
key: user.pw
optional: false
- name: POSTGRES_DB
value: synapse
- name: POSTGRES_INITDB_ARGS
value: "--lc-collate=C --lc-ctype=C --encoding=UTF8"
#livenessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 120
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
#readinessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: db
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: db
namespace: matrix
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: postgres
port: 5432
protocol: TCP
targetPort: 5432
selector:
app: db
sessionAffinity: None
type: ClusterIP
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: matrix-db-backup
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: matrix
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: matrix-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db-backup
namespace: matrix
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 4Gi
storageClassName: matrix-db-backup
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db-backup
namespace: matrix
labels:
app: db-backup
spec:
replicas: 1
selector:
matchLabels:
app: db-backup
template:
metadata:
labels:
app: db-backup
spec:
containers:
- name: db-backup
image: prodrigestivill/postgres-backup-local
imagePullPolicy: "IfNotPresent"
env:
- name: SCHEDULE
value: "@daily"
- name: BACKUP_KEEP_DAYS
value: "7"
- name: POSTGRES_DB
value: "synapse"
- name: POSTGRES_HOST
value: db
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: matrix-db
key: root.pw
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: matrix-db
key: username
optional: false
- name: HEALTHCHECK_PORT
value: "8080"
volumeMounts:
- mountPath: /backups
name: backup
volumes:
- name: backup
persistentVolumeClaim:
claimName: db-backup
readOnly: false

130
matrix/element.yaml Normal file
View File

@@ -0,0 +1,130 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: config-element
namespace: matrix
data:
config.json: |
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://matrix.undercloud.cf:443",
"server_name": "matrix.undercloud.cf"
},
"m.identity_server": {
"base_url": "https://vector.im"
}
},
"disable_custom_urls": false,
"disable_guests": false,
"disable_login_language_selector": false,
"disable_3pid_login": false,
"brand": "Undercloud Communication",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_widgets_urls": [
"https://scalar.vector.im/_matrix/integrations/v1",
"https://scalar.vector.im/api",
"https://scalar-staging.vector.im/_matrix/integrations/v1",
"https://scalar-staging.vector.im/api",
"https://scalar-staging.riot.im/scalar/api"
],
"bug_report_endpoint_url": "https://element.io/bugreports/submit",
"uisi_autorageshake_app": "element-auto-uisi",
"default_country_code": "GB",
"show_labs_settings": true,
"features": {},
"default_federate": true,
"default_theme": "dark",
"room_directory": {
"servers": ["matrix.org","matrix.undercloud.cf"]
},
"enable_presence_by_hs_url": {
"https://matrix.org": false,
"https://matrix-client.matrix.org": false,
"https://matrix.undercloud.cf": true
},
"setting_defaults": {
"breadcrumbs": true
},
"jitsi": {
"preferred_domain": "jitsi.undercloud.cf"
},
"element_call": {
"url": "https://call.element.io",
"participant_limit": 8,
"brand": "Element Call"
},
"map_style_url": "https://api.maptiler.com/maps/streets/style.json?key=fU3vlMsMn4Jb6dnEIFsx"
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: element
namespace: matrix
labels:
app: element
spec:
replicas: 1
selector:
matchLabels:
app: element
template:
metadata:
labels:
app: element
spec:
containers:
- name: element
image: vectorim/element-web:v1.11.20
imagePullPolicy: IfNotPresent
readinessProbe:
httpGet:
path: /
port: element
initialDelaySeconds: 2
periodSeconds: 3
livenessProbe:
httpGet:
path: /
port: element
initialDelaySeconds: 10
periodSeconds: 10
ports:
- containerPort: 80
name: element
volumeMounts:
- mountPath: "/app/config.json"
name: config-element
subPath: config.json
volumes:
- name: config-element
configMap:
name: config-element
items:
- key: "config.json"
path: "config.json"
---
apiVersion: v1
kind: Service
metadata:
name: element
namespace: matrix
labels:
app: element
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: element
sessionAffinity: None
type: ClusterIP

42
matrix/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: matrix
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

Some files were not shown because too many files have changed in this diff Show More