This commit is contained in:
2026-03-15 19:47:19 +00:00
commit 874d7c1df4
372 changed files with 230842 additions and 0 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

140
README.md Normal file
View File

@@ -0,0 +1,140 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-csi-hourly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 15-22 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 8h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-csi-daily
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-csi-weekly
namespace: velero
labels:
velero.io/storage-location: ceph-bucket
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
defaultVolumesToFsBackup: false
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: ceph-bucket
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-restic-daily
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 168h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-restic-weekly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 * * 1 # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 730h0m0s
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: backup-restic-monthly
namespace: velero
labels:
velero.io/storage-location: aux-balancer-minio
spec:
# Schedule is a Cron expression defining when to run the Backup
schedule: 0 0 1 * * # every hour
# Specifies whether to use OwnerReferences on backups created by this Schedule.
# Notice: if set to true, when schedule is deleted, backups will be deleted too. Optional.
useOwnerReferencesInBackup: true
template:
csiSnapshotTimeout: 10m0s
snapshotVolumes: false
defaultVolumesToFsBackup: true
hooks: {}
includedNamespaces:
- demo
metadata: {}
storageLocation: aux-balancer-minio
ttl: 4380h0m0s

16
app-of-apps/argocd.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: default
source:
path: argocd
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: bookstack
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: bookstack
server: https://kubernetes.default.svc
project: default
source:
path: bookstack
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/calico.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: calico
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: calico
server: https://kubernetes.default.svc
project: default
source:
path: calico-config
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/ceph.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ceph
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
#namespace: ceph
server: https://kubernetes.default.svc
project: default
source:
path: ceph
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: cert-manager
server: https://kubernetes.default.svc
project: default
source:
path: cert-manager
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: code-server
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: code-server
server: https://kubernetes.default.svc
project: default
source:
path: code-server
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/demo.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: demo
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: demo
server: https://kubernetes.default.svc
project: default
source:
path: demo
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/dns.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dns
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: dns
server: https://kubernetes.default.svc
project: default
source:
path: dns
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: fileserver
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: fileserver
server: https://kubernetes.default.svc
project: default
source:
path: fileserver
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/forum.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forum
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: forum
server: https://kubernetes.default.svc
project: default
source:
path: forum
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/gitea.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gitea
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: gitea
server: https://kubernetes.default.svc
project: default
source:
path: gitea
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/grafana.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: grafana
server: https://kubernetes.default.svc
project: default
source:
path: grafana
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: guacamole
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: guacamole
server: https://kubernetes.default.svc
project: default
source:
path: guacamole
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/homer.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: homer
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: homer
server: https://kubernetes.default.svc
project: default
source:
path: homer
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-external
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: ingress-external
server: https://kubernetes.default.svc
project: default
source:
path: ingress-external-devices
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/ingress.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: ingress-nginx
server: https://kubernetes.default.svc
project: default
source:
path: ingress-nginx
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/jellyfin.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jellyfin
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: jellyfin
server: https://kubernetes.default.svc
project: default
source:
path: jellyfin
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/jitsi.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jitsi
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: jitsi
server: https://kubernetes.default.svc
project: default
source:
path: jitsi
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-metrics
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
#namespace: kube-metrics
server: https://kubernetes.default.svc
project: default
source:
path: kube-metrics
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-state-metrics
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
#namespace: kube-state-metrics
server: https://kubernetes.default.svc
project: default
source:
path: kube-state-metrics
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/kubevirt.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubevirt
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: kubevirt
server: https://kubernetes.default.svc
project: default
source:
path: kubevirt
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/logging.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: logging
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: logging
server: https://kubernetes.default.svc
project: default
source:
path: logging
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/loki.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: loki
server: https://kubernetes.default.svc
project: default
source:
path: loki
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/mail.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mail
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: mail
server: https://kubernetes.default.svc
project: default
source:
path: mail
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/matrix.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: matrix
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: matrix
server: https://kubernetes.default.svc
project: default
source:
path: matrix
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nextcloud
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: nextcloud
server: https://kubernetes.default.svc
project: default
source:
path: nextcloud
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/openldap.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: openldap
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: openldap
server: https://kubernetes.default.svc
project: default
source:
path: openldap
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: paperless
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: paperless
server: https://kubernetes.default.svc
project: default
source:
path: paperless
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/polly.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: polly
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: polly
server: https://kubernetes.default.svc
project: default
source:
path: polly
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: portainer
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: portainer
server: https://kubernetes.default.svc
project: default
source:
path: portainer
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: snapshots
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
#namespace: snapscheduler
server: https://kubernetes.default.svc
project: default
source:
path: snapshots
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/sshwifty.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: sshwifty
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: sshwifty
server: https://kubernetes.default.svc
project: default
source:
path: sshwifty
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vaultwarden
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: vaultwarden
server: https://kubernetes.default.svc
project: default
source:
path: vaultwarden
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: velero-ui
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: velero-ui
server: https://kubernetes.default.svc
project: default
source:
path: velero-ui
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

16
app-of-apps/velero.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: velero
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: velero
server: https://kubernetes.default.svc
project: default
source:
path: velero
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: victoria-monitoring
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: vm
server: https://kubernetes.default.svc
project: default
source:
path: victoria-monitoring
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: wordpress
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: wordpress
server: https://kubernetes.default.svc
project: default
source:
path: wordpress
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

BIN
argocd/.DS_Store vendored Normal file

Binary file not shown.

7
argocd/README.md Normal file
View File

@@ -0,0 +1,7 @@
that is the install manifest from the argocd quickstart guide
i have added control-node tolerations to all depolyments so it can come up during bootstrap when there is only the master node in the k8s cluster.
not working:
ldap login
ldap only for admins

16
argocd/apps.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: app-of-apps
server: https://kubernetes.default.svc
project: default
source:
path: app-of-apps
repoURL: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
targetRevision: HEAD

15
argocd/argocd-secret.yaml Normal file
View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
labels:
app.kubernetes.io/name: argocd-secret
app.kubernetes.io/part-of: argocd
type: Opaque
data:
admin.password: JDJhJDEwJHhpRFAzcHZsNmdrNzlNUEpUZU12aHVSVHR5REppWXVZZUN3eXBIenpqcmpRRkMxV0NrUkVL
admin.passwordMtime: MjAyMy0wMS0wOVQxNTo0MDoxMFo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZakNDQWtxZ0F3SUJBZ0lRRU92UHJkOTZLTzVrbnFNQzJIK2dhakFOQmdrcWhraUc5dzBCQVFzRkFEQVMKTVJBd0RnWURWUVFLRXdkQmNtZHZJRU5FTUI0WERUSXpNREV3T1RFNE1qRTBObG9YRFRJME1ERXdPVEU0TWpFMApObG93RWpFUU1BNEdBMVVFQ2hNSFFYSm5ieUJEUkRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDCkFRb0NnZ0VCQUszeTdhSkVpRVhqcXZRdWRwWE5EYXdKM2lhT0xHUjRLMk9OMll6WWxvUTBoNFR5V0tlbXMzdkwKK1VWd2UxZGMxL1pMc1hxbk9wYkI3T2hySjhKRTQ5N2NlTk4xc25vTHdpejlYRHJxTnA2MGZJYkc1Vnh2QW9jagpJU3NBVTBkamJhc0pmMlpTRkpSYktKYXZvTWhjbFcyanpsU0U1djhvY1JReGZ5ekVQQ01DZDFCQVdHZE9RUUZmCmxxTzJ5SmVDRHZ4clgvWlR2ZXFUL0pjTzdwYWNWTnNYc2Q3QTlrUnF0b1l6bWxMQVpkNHQ2NFZoYTJXazZzZDgKRzZNakdjN043S05xZ09nb21tczNHYzR3b0NWVGlhRnltcDNlTXFpZEtUSTBGSmZtM1Q4cmphY2l2L0RyV0ZldApyUHM4MEZwL3FWaGNaQXpTSzVvcFVVYWJVK0ZMd1lFQ0F3RUFBYU9Cc3pDQnNEQU9CZ05WSFE4QkFmOEVCQU1DCkJhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0V3REFZRFZSMFRBUUgvQkFJd0FEQjdCZ05WSFJFRWREQnkKZ2dsc2IyTmhiR2h2YzNTQ0RXRnlaMjlqWkMxelpYSjJaWEtDRkdGeVoyOWpaQzF6WlhKMlpYSXVZWEpuYjJOawpnaGhoY21kdlkyUXRjMlZ5ZG1WeUxtRnlaMjlqWkM1emRtT0NKbUZ5WjI5alpDMXpaWEoyWlhJdVlYSm5iMk5rCkxuTjJZeTVqYkhWemRHVnlMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQkticzQva0dhSnNSdnUKT3VRSzRMSFh0bk5oZmdPYXV2VVVnUUU1TmFGYXBkVVJMME1kMDVCQkVkanZjNWp6QVlRd1UzNmFjQSs3L1l0NQpDTFVhQUc0dDZhZ0hrT2tRZ28zbEgxZmxoekFVOXRSanMvU3NyWEJMdVhWVnBibWtNQ0h5VmNCRUx2Q2wyU1RECm5aRkJWUi9ldEozOTBtMVhEOVpqcFh4cUc3V1hjeWxsd1BmVm1oWktLQ3FlRTFZUTVnQnRsZDFuQm9PZDI2NmIKb2ZzbFdVRkdHbHNjUXBMRGthb3p6bUUwWkUveUVBZ3RqRUhwVWlxZWtBMzY2Zm1wWC8rMmJ5ZU1mcENybjZwcApVSUVSVE92a09RWnlmTHRweFpvVis2NmdJYmpWZzhjanlsSVZFWUJURTB6QVZDZVNORlpabml4YXdZaFIvL1VwCjhGclVVRlIyCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBcmZMdG9rU0lSZU9xOUM1MmxjME5yQW5lSm80c1pIZ3JZNDNaak5pV2hEU0hoUEpZCnA2YXplOHY1UlhCN1Yxelg5a3V4ZXFjNmxzSHM2R3Nud2tUajN0eDQwM1d5ZWd2Q0xQMWNPdW8ybnJSOGhzYmwKWEc4Q2h5TWhLd0JUUjJOdHF3bC9abElVbEZzb2xxK2d5RnlWYmFQT1ZJVG0veWh4RkRGL0xNUThJd0ozVUVCWQpaMDVCQVYrV283YklsNElPL0d0ZjlsTzk2cFA4bHc3dWxweFUyeGV4M3NEMlJHcTJoak9hVXNCbDNpM3JoV0ZyClphVHF4M3dib3lNWnpzM3NvMnFBNkNpYWF6Y1p6akNnSlZPSm9YS2FuZDR5cUowcE1qUVVsK2JkUHl1TnB5Sy8KOE90WVY2MnMrenpRV24rcFdGeGtETklybWlsUlJwdFQ0VXZCZ1FJREFRQUJBb0lCQUFsRURucDlVaEQrL3FWQgpNRm5zY1ZUL3RaQ3lOQlVwL1kyeENheWlBT3FMN2NnY0xMTEpnM2dOdG5xSGNscUYvSThIR3k0Z2FGT05ndlFpCmVSeTFGTHBUWGpCTmJiTm9tdkJRa3JBa3Q4SWVkUitzeXB6dzRONlg5WFZYOEJlMmFTb2FPR29YNXphNzRGNmUKZmw4TjUzdk9wUGdGbUZSeE1BeldVaUx0VlJLOFFXbEpSbC9YUGc3YVphRS9pTjh0QlJQWHNybS8welNaNTBDSgpYYmtMaEpHcDZVcXJENVkydHIrVk5FWWlnaGtMS21UaWplcmYySFN0c1lHR2hTZUJocTJTS1RMa0tsSHFlZDJiCnRDUXRTZi9wc0UyODlMeVNsZXo4U0d3dzU2RXpkTVlGNzA4cThQd2FVQ3NKMmx3YUF5OGJqYUNHYU9tMFBCYXgKSlZxaUFrMENnWUVBMFcyZHVGUmRQN0dmK2pyQnNxNFJINGxnQ1hBNVhTd3U2SU9GZnphM3ZvUU5LYWdxWEIwRwpCOFhhdmpscWRUcEdtK29rQTYxcVFjbHozRU0vRGlVbi9LZENxamVZaXlsREt0YUJ4MmMzVjV1cXkvSzNoS3NJCnczbndOVDdxdFJ1MnFxYmE2amZLVEZUQjFMYXN6KzRNK1hieUpkS1Bucnk3QWRrclNnd2Y3QWNDZ1lFQTFLR0sKT3hhUUVZTEErQWJtdEM2c2lRUDJzTEJOTXl4MUUwK015ZzR2SHN2bGJmM056Vkd1K0ZxMmxudUsza1JWV1czWgorM04yTGxmL3VrTVIwKzNMNk4xZVJIYUg1NEdMMGFEMnFBOHU0Y2gvajFvc2xhM1lSQVRPempvRDNxY1VoL1l1CnFHV1dFcHVtRmpVRHcwUy9senQ4T280bC9nSEszcVl0MFJja2xEY0NnWUFFUmNFMjlubWtpQUlrZjdoZDVkRXgKbklMYURuRU5KbzZhYmJ4MmVPNU1zN0wxQzVvNW9ObnpwS1N1eHNzV015Y25uU0k0OVB1Njlkb29QekwxSGVydgo2NVdmbFZ6R1VnUXR4b3lGUGVUU0k3bmJTVXRYS0lvWU9zK2N0bmpoVXZ4cUJOZG9lZFRsczhEMGp2bDlrSFN4ClRpem9lcUExYzlJNDJtNVlwMkNyVndLQmdFTnNPYlhFME1nTXF2MjZ5bnJGOWdXNFVRZnp6M2J6bU9nWHFIQUYKa1dGMWkvbVZxaVY1eWo2TUhTdVlYdzlkb2FhcTA1ME5IcU5SV2hDSTVlVkttUWJzOHRCSXVZMXFJWmpHTHBCWgp5Q2JsK2JUT0JpY3NLZEJmcWVmd3MzdHRoWEFiV3U0ZzBEWjBUblRxT0pnNWUzc0w3TGR3alpGK3BnSjF0VCtBCjFDTi9Bb0dBWGY2R1FqN2RRcnlVUFZ2ZW9pR2RFWFQycGx1RmJkZUdRNmNzd0FPc0V0Y2VvczR5cjFXWExrR1oKbm5lMzlOZFJtVURKYUlUb3RCcnl1SGIyRFdJeEpOMlZ3dkZ5RmxxdTBFM1p3cTIzL2hzdFF1VWpXdGJYZ0dtdwpiTlc0SnhXclBISGZBWFdWUGRUcGNpK2VTSTBCVnhpMFhNQkc5dDViNkY1Z1ViOStOdlE9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
dex.ldap.bindPW: YXJnb2Nkc2VjdXJlUFc=

26
argocd/ingress.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- hosts:
- argocd.apps.undercloud.dev
secretName: argocd-tls
rules:
- host: argocd.apps.undercloud.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 443

26833
argocd/install.yaml Normal file

File diff suppressed because it is too large Load Diff

6
argocd/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd
labels:
prometheus: prometheus

38
argocd/patch-dex.yaml Normal file
View File

@@ -0,0 +1,38 @@
apiVersion: v1
data:
# Leave as true untill LDAP has been successfuly set up - enables manual login
admin.enabled: "true"
dex.config: |
connectors:
- type: ldap
name: LDAP
id: ldap
config:
# Ldap server address (choice of port depends on your set-up, see docs)
host: "ldap.undercloud.local:389"
insecureNoSSL: true
insecureSkipVerify: true
startTLS: false
# Variable name stores ldap bindDN in argocd-secret
bindDN: "cn=argocd,ou=serviceaccounts,ou=users,dc=undercloud,dc=local"
# Variable name stores ldap bind password in argocd-secret
bindPW: "$dex.ldap.bindPW"
usernamePrompt: username
# Ldap user search attributes
userSearch:
baseDN: "cn=users,dc=undercloud,dc=local"
filter: "(objectClass=inetOrgPerson)"
username: uid
idAttr: uid
emailAttr: mail
nameAttr: cn
groupSearch:
baseDN: "dc=undercloud,dc=local"
filter: "(objectClass=groupOfUniqueNames)"
userMatchers:
- userAttr: uid
groupAttr: uniqueMember
# Represents group name.
nameAttr: name
# This will prevent ldap login to redirect to itself.
url: argocd.apps.undercloud.dev

42
argocd/repo.yaml Normal file
View File

@@ -0,0 +1,42 @@
#apiVersion: v1
#kind: Secret
#metadata:
# name: build-node-undercloud-infrastructure
# namespace: argocd
# labels:
# argocd.argoproj.io/secret-type: repository
#type: Opaque
#stringData:
# # URL de ton dépôt Git
# url: https://git.undercloud.local/Undercloud/undercloud-infrastructure.git
# # Mets "true" si HTTP clair ou certificat non fiable
# insecure: "true"
#---
#apiVersion: v1
#kind: Secret
#metadata:
# name: build-node-k8s-apps
# namespace: argocd
# labels:
# argocd.argoproj.io/secret-type: repository
#type: Opaque
#stringData:
# # URL de ton dépôt Git
# url: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
# # Mets "true" si HTTP clair ou certificat non fiable
# insecure: "true"
#---
apiVersion: v1
kind: Secret
metadata:
name: gitea-k8s-apps
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
type: Opaque
stringData:
url: http://gitea.gitea.svc.k8s.undercloud.local:3000/Undercloud/k8s-apps.git
username: shodan
password: 4IsTheMindKiller
insecure: "true" # skip TLS verification / allow HTTP
enableLfs: "true" # Git LFS support

View File

@@ -0,0 +1,56 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-notifications-controller-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-notifications-controller-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-server-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-server-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s

24
argocd/service.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: argocd-server
namespace: argocd
spec:
type: ClusterIP
clusterIP: 2001:470:7116:f:1::81
clusterIPs:
- 2001:470:7116:f:1::81
- 10.0.91.81
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: RequireDualStack
ports:
- name: http
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8080
selector:
app.kubernetes.io/name: argocd-server

BIN
bookstack/.DS_Store vendored Normal file

Binary file not shown.

19
bookstack/README.md Normal file
View File

@@ -0,0 +1,19 @@
# Bookstack
## Wiki
BookStack is a simple, self-hosted, easy-to-use platform for organising and storing information.
default admin:
admin@admin.com
default pw:
password
improvements:
one has to initialize the program with LDAP off to create a default admin
then change to ldap and rerun the deployment
i would also like to set the groups and rights automatically in the deployment
smtp settings
metrics
liveness probes

47
bookstack/adminer.yaml Normal file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: adminer
namespace: bookstack
labels:
app: adminer
spec:
replicas: 1
selector:
matchLabels:
app: adminer
template:
metadata:
labels:
app: adminer
spec:
containers:
- name: adminer
image: adminer
imagePullPolicy: IfNotPresent
env:
- name: ADMINER_DEFAULT_SERVER
value: db
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: adminer
namespace: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: adminer
sessionAffinity: None
type: ClusterIP

View File

@@ -0,0 +1,90 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-csi-hourly
namespace: velero
spec:
schedule: "0 15-22 * * *"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["bookstack"]
ttl: 8h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-csi-daily
namespace: velero
spec:
schedule: "0 0 * * *"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["bookstack"]
ttl: 168h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-csi-weekly
namespace: velero
spec:
schedule: "0 0 * * 1"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["bookstack"]
ttl: 730h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-daily
namespace: velero
spec:
schedule: "30 2 * * *" # tous les jours 02:30
useOwnerReferencesInBackup: true
template:
includedNamespaces: [bookstack]
storageLocation: default
ttl: 336h # ~14 jours
snapshotVolumes: false
defaultVolumesToFsBackup: true
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-weekly
namespace: velero
spec:
schedule: "0 3 * * 0" # chaque dimanche 03:00
useOwnerReferencesInBackup: true
template:
includedNamespaces: [bookstack]
storageLocation: default
ttl: 1344h # ~8 semaines
snapshotVolumes: false
defaultVolumesToFsBackup: true
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: bookstack-monthly
namespace: velero
spec:
schedule: "0 4 1 * *" # 1er du mois 04:00
useOwnerReferencesInBackup: true
template:
includedNamespaces: [bookstack]
storageLocation: default
ttl: 8760h # ~12 mois
snapshotVolumes: false
defaultVolumesToFsBackup: true

318
bookstack/bookstack.yaml Normal file
View File

@@ -0,0 +1,318 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: env
namespace: bookstack
data:
# file-like keys
.env: |
#from configmap
# This file, when named as ".env" in the root of your BookStack install
# folder, is used for the core configuration of the application.
# By default this file contains the most common required options but
# a full list of options can be found in the '.env.example.complete' file.
# NOTE: If any of your values contain a space or a hash you will need to
# wrap the entire value in quotes. (eg. MAIL_FROM_NAME="BookStack Mailer")
# Use dark mode by default
# Will be overridden by any existing user/session preference.
APP_DEFAULT_DARK_MODE=true
# Application key
# Used for encryption where needed.
# Run `php artisan key:generate` to generate a valid key.
APP_KEY=base64:Gvel4j1kfhBBoT7aho5ibdozSkf7BwB/4vDfSbMTkiU=
# Application URL
# This must be the root URL that you want to host BookStack on.
# All URLs in BookStack will be generated using this value
# to ensure URLs generated are consistent and secure.
# If you change this in the future you may need to run a command
# to update stored URLs in the database. Command example:
# php artisan bookstack:update-url https://old.example.com https://new.example.com
APP_URL=https://bookstack.apps.undercloud.dev
# Database details
DB_HOST='db'
DB_PORT='3306'
DB_DATABASE='bookstack'
DB_USERNAME='bookstack'
DB_PASSWORD='verysecurePWDBbookstackbookstack'
# Mail system to use
# Can be 'smtp' or 'sendmail'
MAIL_DRIVER=smtp
# Mail sender details
MAIL_FROM_NAME="BookStack"
MAIL_FROM=bookstack@example.com
# SMTP mail options
# These settings can be checked using the "Send a Test Email"
# feature found in the "Settings > Maintenance" area of the system.
MAIL_HOST=localhost
MAIL_PORT=1025
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
# General auth
# user standard login for the first time to create deault admin
# then enable ldap
AUTH_METHOD=ldap
# AUTH_METHOD=standard
# The LDAP host, Adding a port is optional
#LDAP_SERVER=example.com:389
# If using LDAP over SSL you should also define the protocol:
LDAP_SERVER=ldaps://ldap.undercloud.local:636
# The base DN from where users will be searched within
LDAP_BASE_DN="ou=users,dc=undercloud,dc=local"
# The full DN and password of the user used to search the server
# Can both be left as 'false' (without quotes) to bind anonymously
LDAP_DN="cn=bookstack,ou=serviceaccounts,ou=users,dc=undercloud,dc=local"
LDAP_PASS="thisismysecureLDAPPWbookstack"
# A filter to use when searching for users
# The user-provided user-name used to replace any occurrences of '${user}'
# If you're setting this option via other means, such as within a docker-compose.yml,
# you may need escape the $, often using $$ or \$ instead.
# Note: This option cannot be used with the docker-compose.yml `env_file` option.
#LDAP_USER_FILTER=(&(uid=${user}))
LDAP_USER_FILTER=(&(uid=${user})(memberOf=cn=users,ou=groups,dc=undercloud,dc=local))
# Set the LDAP version to use when connecting to the server
# Should be set to 3 in most cases.
LDAP_VERSION=3
# Set the property to use as a unique identifier for this user.
# Stored and used to match LDAP users with existing BookStack users.
# Prefixing the value with 'BIN;' will assume the LDAP service provides the attribute value as
# binary data and BookStack will convert the value to a hexidecimal representation.
# Defaults to 'uid'.
LDAP_ID_ATTRIBUTE=uid
# Set the default 'email' attribute. Defaults to 'mail'
LDAP_EMAIL_ATTRIBUTE=mail
# Set the property to use for a user's display name. Defaults to 'cn'
LDAP_DISPLAY_NAME_ATTRIBUTE=cn
# Set the attribute to use for the user's avatar image.
# Must provide JPEG binary image data.
# Will be used upon login or registration when the user doesn't
# already have an avatar image set.
# Remove this option or set to 'null' to disable LDAP avatar import.
LDAP_THUMBNAIL_ATTRIBUTE=jpegphoto
# Force TLS to be used for LDAP communication.
# Use this if you can but your LDAP support will need to support it and
# you may need to import your certificate to the BookStack host machine.
# Defaults to 'false'.
LDAP_START_TLS=false
# If you need to allow untrusted LDAPS certificates, add the below and uncomment (remove the #)
# Only set this option if debugging or you're absolutely sure it's required for your setup.
# If using php-fpm, you may want to restart it after changing this option to avoid instability.
LDAP_TLS_INSECURE=true
# If you need to debug the details coming from your LDAP server, add the below and uncomment (remove the #)
# Only set this option if debugging since it will block logins and potentially show private details.
#LDAP_DUMP_USER_DETAILS=true
# Enable LDAP group sync, Set to 'true' to enable.
LDAP_USER_TO_GROUPS=true
# LDAP user attribute containing groups, Defaults to 'memberOf'.
LDAP_GROUP_ATTRIBUTE="memberOf"
# Remove users from roles that don't match LDAP groups.
# Note: While this is enabled the "Default Registration Role", editable within the
# BookStack settings view, will be considered a matched role and assigned to the user.
LDAP_REMOVE_FROM_GROUPS=false
# If you need to debug the group details coming from your LDAP server, add the below and uncomment (remove the #).
# Only set this option if debugging since it will block logins and potentially show private details.
#LDAP_DUMP_USER_GROUPS=true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: startup
namespace: bookstack
data:
startup.sh: |
#!/bin/sh
echo "startup..."
#if test ! -f "/config/startup.ran"; then
# touch /config/startup.ran
cp -f /mnt/.env /config/www/.env
#else
# echo "startup ran already!"
#fi
echo "startup done."
#exit 123
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: bookstack
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 32Gi
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: bookstack
namespace: bookstack
labels:
app: bookstack
spec:
replicas: 1
selector:
matchLabels:
app: bookstack
template:
metadata:
labels:
app: bookstack
spec:
dnsConfig:
options:
- name: ndots
value: "1"
initContainers:
#- name: copyappini
# image: linuxserver/bookstack
# command: ['/bin/startup.sh']
# volumeMounts:
# - mountPath: "/config"
# name: bookstack
# - mountPath: "/mnt/.env"
# name: env
# subPath: .env
# - mountPath: /bin/startup.sh
# name: startup
# subPath: startup.sh
containers:
- name: bookstack
image: linuxserver/bookstack
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
#lifecycle:
# postStart:
# exec:
# command:
# - "/bin/startup.sh"
#livenessProbe:
# httpGet:
# path: /status
# port: 80
# #httpHeaders:
# #- name: Custom-Header
# # value: Awesome
# initialDelaySeconds: 120
# periodSeconds: 10
env:
#- name: PUID
# value: "1000"
#- name: PGID
# value: "1000"
#- name: DB_HOST
# value: "db"
#- name: DB_PORT
# value: "3306"
#- name: APP_URL
# value: "https://bookstack.apps.undercloud.dev"
- name: DB_USER
valueFrom:
secretKeyRef:
name: bookstack-db
key: username
optional: false
- name: DB_PASS
valueFrom:
secretKeyRef:
name: bookstack-db
key: user.pw
optional: false
- name: DB_DATABASE
value: "bookstack"
volumeMounts:
- mountPath: "/config"
name: bookstack
- mountPath: "/config/www/.env"
name: env
subPath: .env
#lifecycle:
# postStart:
# exec:
# command:
# - /bin/sh
# - -c
# - |
# i=0
# until php /app/www/artisan migrate:status >/dev/null 2>&1; do
# i=$((i+1))
# [ "$i" -gt 60 ] && exit 1
# sleep 5
# done
# php /app/www/artisan bookstack:create-admin \
# --initial \
# --email="${ADMIN_EMAIL}" \
# --name="${ADMIN_NAME}" \
# --password="${ADMIN_PASSWORD}" || [ $? -eq 2 ]
volumes:
- name: bookstack
persistentVolumeClaim:
claimName: bookstack
- name: env
configMap:
name: env
defaultMode: 0777
items:
- key: ".env"
path: ".env"
#- name: startup
# configMap:
# name: startup
# defaultMode: 0700
# items:
# - key: "startup.sh"
# path: "startup.sh"
---
apiVersion: v1
kind: Service
metadata:
name: bookstack
namespace: bookstack
labels:
app: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: bookstack
sessionAffinity: None
type: ClusterIP

158
bookstack/db.yaml Normal file
View File

@@ -0,0 +1,158 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 16Gi
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
namespace: bookstack
labels:
app: db
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: mariadb:10.5
imagePullPolicy: "IfNotPresent"
ports:
- name: mysql
containerPort: 3306
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: root.pw
- name: MARIADB_USER
valueFrom:
secretKeyRef:
name: bookstack-db
key: username
optional: false
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: user.pw
optional: false
- name: MARIADB_DATABASE
value: bookstack
#livenessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 120
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
#readinessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
volumeMounts:
- mountPath: /var/lib/mysql
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: db
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: db
namespace: bookstack
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
selector:
app: db
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db-backup
namespace: bookstack
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db-backup
namespace: bookstack
labels:
app: db-backup
spec:
replicas: 1
selector:
matchLabels:
app: db-backup
template:
metadata:
labels:
app: db-backup
spec:
containers:
- name: db-backup
image: rsprta/mariadb-backup
imagePullPolicy: "IfNotPresent"
env:
- name: CRON_TIMER
value: "@daily"
- name: MARIADB_HOST
value: db
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: bookstack-db
key: root.pw
- name: MARIADB_USER
value: root
- name: MARIADB_PORT
value: "3306"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
persistentVolumeClaim:
claimName: db-backup
readOnly: false

View File

@@ -0,0 +1,261 @@
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDashboard
metadata:
name: bookstack-overview
namespace: grafana
labels:
dashboards: "grafana"
spec:
instanceSelector:
matchLabels:
dashboards: "grafana"
json: |
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"panels": [
{
"type": "stat",
"title": "Deployment Available Replicas",
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 0 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "kube_deployment_status_replicas_available{namespace=\"bookstack\",deployment=\"bookstack\"}",
"refId": "A"
}
],
"options": {
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
"orientation": "auto",
"textMode": "auto",
"colorMode": "value"
}
},
{
"type": "stat",
"title": "Deployment Desired Replicas",
"gridPos": { "h": 4, "w": 6, "x": 6, "y": 0 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "kube_deployment_spec_replicas{namespace=\"bookstack\",deployment=\"bookstack\"}",
"refId": "A"
}
],
"options": {
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
"orientation": "auto",
"textMode": "auto",
"colorMode": "value"
}
},
{
"type": "stat",
"title": "Pod Ready",
"gridPos": { "h": 4, "w": 6, "x": 12, "y": 0 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "max(kube_pod_status_ready{namespace=\"bookstack\",pod=~\"bookstack.*\",condition=\"true\"})",
"refId": "A"
}
],
"options": {
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
"orientation": "auto",
"textMode": "auto",
"colorMode": "value"
}
},
{
"type": "stat",
"title": "Restarts (24h)",
"gridPos": { "h": 4, "w": 6, "x": 18, "y": 0 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "sum(increase(kube_pod_container_status_restarts_total{namespace=\"bookstack\",pod=~\"bookstack.*\"}[24h]))",
"refId": "A"
}
],
"options": {
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false },
"orientation": "auto",
"textMode": "auto",
"colorMode": "value"
}
},
{
"type": "timeseries",
"title": "CPU Usage",
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "sum by (pod) (rate(container_cpu_usage_seconds_total{namespace=\"bookstack\",pod=~\"bookstack.*\",container!=\"\",image!=\"\"}[5m]))",
"legendFormat": "{{pod}}",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "cores"
},
"overrides": []
},
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
}
},
{
"type": "timeseries",
"title": "Memory Working Set",
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "sum by (pod) (container_memory_working_set_bytes{namespace=\"bookstack\",pod=~\"bookstack.*\",container!=\"\",image!=\"\"})",
"legendFormat": "{{pod}}",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "bytes"
},
"overrides": []
},
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
}
},
{
"type": "timeseries",
"title": "PVC Used Bytes",
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 12 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "kubelet_volume_stats_used_bytes{namespace=\"bookstack\",persistentvolumeclaim=\"bookstack\"}",
"legendFormat": "bookstack PVC",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "bytes"
},
"overrides": []
},
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
}
},
{
"type": "timeseries",
"title": "PVC Usage Percent",
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 12 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "100 * kubelet_volume_stats_used_bytes{namespace=\"bookstack\",persistentvolumeclaim=\"bookstack\"} / kubelet_volume_stats_capacity_bytes{namespace=\"bookstack\",persistentvolumeclaim=\"bookstack\"}",
"legendFormat": "bookstack PVC",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "percent"
},
"overrides": []
},
"options": {
"legend": {
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
}
},
{
"type": "table",
"title": "Pods in Namespace",
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 20 },
"datasource": { "type": "prometheus", "uid": "${datasource}" },
"targets": [
{
"expr": "kube_pod_info{namespace=\"bookstack\"}",
"format": "table",
"instant": true,
"refId": "A"
}
],
"options": {
"showHeader": true
}
}
],
"refresh": "30s",
"schemaVersion": 39,
"style": "dark",
"tags": ["bookstack", "kubernetes", "namespace"],
"templating": {
"list": [
{
"name": "datasource",
"type": "datasource",
"query": "prometheus",
"refresh": 1,
"label": "Datasource"
}
]
},
"time": {
"from": "now-24h",
"to": "now"
},
"title": "BookStack Namespace Overview",
"uid": "bookstack-namespace-overview",
"version": 1,
"weekStart": ""
}

25
bookstack/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bookstack
namespace: bookstack
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- bookstack.apps.undercloud.dev
secretName: bookstack-tls
rules:
- host: bookstack.apps.undercloud.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: bookstack
port:
number: 80

6
bookstack/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: bookstack
labels:
prometheus: prometheus

22
bookstack/secrets.yaml Normal file
View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Secret
metadata:
name: bookstack-db
namespace: bookstack
type: Opaque
data:
root.pw: dmVyeXNlY3VyZVBXREJib29rc3RhY2tyb290
username: Ym9va3N0YWNr
user.pw: dmVyeXNlY3VyZVBXREJib29rc3RhY2tib29rc3RhY2s=
---
apiVersion: v1
kind: Secret
metadata:
name: admin
namespace: bookstack
type: Opaque
stringData:
username: admin
email: admin@undercloud.local
data:
pw: NElzVGhlTWluZEtpbGxlcg==

View File

@@ -0,0 +1,127 @@
---
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
#logSeverityScreen: Info
nodeToNodeMeshEnabled: false
asNumber: 65000
serviceClusterIPs:
- cidr: "2001:470:7116:f:1::/108" #server service net
- cidr: "2001:470:7116:f:2::/108" #dmz service net
#- cidr: "fd00:0:0:f:1::/108" #server service net
#- cidr: "fd00:0:0:f:2::/108" #dmz service net
#- cidr: "fd00:0:0:a::/108" #server service net
- cidr: "10.0.91.0/24" #server service net
- cidr: "10.0.92.0/24" #dmz service net
---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router1-v6"
#spec:
# peerIP: "fd00:0:0:2::88" #aux1 - bgp router...
# asNumber: 65000
# nextHopMode: "Self"
# sourceAddress: "UseNodeIP"
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router1-v4"
#spec:
# peerIP: "10.0.2.88" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router2-v6"
#spec:
# peerIP: "fd00:0:0:2::89" #aux1 - bgp router...
# asNumber: 65000
# nextHopMode: "Self"
# sourceAddress: "UseNodeIP"
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router2-v4"
#spec:
# peerIP: "10.0.2.89" #aux2 - bgp router...
# asNumber: 65000
#---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: "coreswitch-v6"
spec:
peerIP: "fd00:0:0:2::3" #aux1 - bgp router...
asNumber: 65000
nextHopMode: "Self"
sourceAddress: "UseNodeIP"
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: "coreswitch-v4"
spec:
peerIP: "10.0.2.3" #aux2 - bgp router...
asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "firewall-v4"
#spec:
# peerIP: "10.0.2.1" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "coreswitch-v6"
#spec:
# peerIP: "fd00:0:0:2::3" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "coreswitch-v4"
#spec:
# peerIP: "10.0.2.3" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux1-v6"
#spec:
# peerIP: "fd00:0:0:2::6" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux2-v6"
#spec:
# peerIP: "fd00:0:0:2::7" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux1-v4"
#spec:
# peerIP: "10.0.2.6" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux2-v4"
#spec:
# peerIP: "10.0.2.7" #aux2 - bgp router...
# asNumber: 65000

51
calico-config/calico.yaml Normal file
View File

@@ -0,0 +1,51 @@
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
typhaMetricsPort: 9093
calicoNetwork:
nodeAddressAutodetectionV4:
interface: eth.*
nodeAddressAutodetectionV6:
cidrs:
- "2001:470:7116:2::/64"
ipPools:
- blockSize: 122
cidr: 2001:470:7116:a::/64
encapsulation: "VXLAN"
natOutgoing: Enabled
nodeSelector: all()
- blockSize: 26
cidr: 10.0.10.0/24
encapsulation: IPIP
natOutgoing: Enabled
nodeSelector: all()
flexVolumePath: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
---
apiVersion: v1
kind: Service
metadata:
name: typha-metrics-svc
namespace: calico-system
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
type: ClusterIP
selector:
k8s-app: calico-typha
ports:
- port: 9093
targetPort: 9093
name: metrics-port
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

9337
calico-config/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
ipPools:
- blockSize: 122
cidr: 2001:470:7116:a::/64
encapsulation: VXLAN
natOutgoing: Disabled
nodeSelector: all()
- blockSize: 26
cidr: 10.0.10.0/24
encapsulation: IPIP
natOutgoing: Enabled
nodeSelector: all()
flexVolumePath: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
---
# Configures the Calico Goldmane flow aggregator.
apiVersion: operator.tigera.io/v1
kind: Goldmane
metadata:
name: default
---
# Configures the Calico Whisker observability UI.
apiVersion: operator.tigera.io/v1
kind: Whisker
metadata:
name: default

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-server-private # server net
spec:
blockSize: 122
cidr: fd00:0:0:a::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-dmz-private # dmz net
spec:
blockSize: 122
cidr: fd00:0:0:b::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-dmz-public # dmz net
spec:
blockSize: 122
cidr: 2001:470:7116:b::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Namespace
metadata:
name: calico-system
labels:
app.kubernetes.io/instance: calico
prometheus: prometheus
---
apiVersion: v1
kind: Namespace
metadata:
name: calico-apiserver
labels:
app.kubernetes.io/instance: calico
prometheus: prometheus

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,590 @@
apiVersion: v1
kind: Namespace
metadata:
name: tigera-operator
labels:
name: tigera-operator
pod-security.kubernetes.io/enforce: privileged
---
# Source: tigera-operator/templates/tigera-operator/02-serviceaccount-tigera-operator.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tigera-operator
namespace: tigera-operator
labels:
k8s-app: tigera-operator
imagePullSecrets:
[]
---
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator-secrets.yaml
# Permissions required to manipulate operator secrets for a Calico cluster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tigera-operator-secrets
labels:
k8s-app: tigera-operator
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- update
- delete
---
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator.yaml
# Permissions required when running the operator for a Calico cluster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tigera-operator
labels:
k8s-app: tigera-operator
rules:
# The tigera/operator installs CustomResourceDefinitions necessary for itself
# and Calico more broadly to function.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- create
# We only allow update access to our own CRDs.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- apiservers.operator.tigera.io
- gatewayapis.operator.tigera.io
- imagesets.operator.tigera.io
- installations.operator.tigera.io
- tigerastatuses.operator.tigera.io
- bgpconfigurations.crd.projectcalico.org
- bgpfilters.crd.projectcalico.org
- bgppeers.crd.projectcalico.org
- blockaffinities.crd.projectcalico.org
- caliconodestatuses.crd.projectcalico.org
- clusterinformations.crd.projectcalico.org
- felixconfigurations.crd.projectcalico.org
- globalnetworkpolicies.crd.projectcalico.org
- stagedglobalnetworkpolicies.crd.projectcalico.org
- globalnetworksets.crd.projectcalico.org
- hostendpoints.crd.projectcalico.org
- ipamblocks.crd.projectcalico.org
- ipamconfigs.crd.projectcalico.org
- ipamhandles.crd.projectcalico.org
- ippools.crd.projectcalico.org
- ipreservations.crd.projectcalico.org
- kubecontrollersconfigurations.crd.projectcalico.org
- networkpolicies.crd.projectcalico.org
- stagednetworkpolicies.crd.projectcalico.org
- stagedkubernetesnetworkpolicies.crd.projectcalico.org
- networksets.crd.projectcalico.org
- tiers.crd.projectcalico.org
- whiskers.operator.tigera.io
- goldmanes.operator.tigera.io
- managementclusterconnections.operator.tigera.io
# We need update and delete access for ANP/BANP CRDs to set owner refs when assuming control of pre-existing CRDs, for example on OCP.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
- delete
resourceNames:
- adminnetworkpolicies.policy.networking.k8s.io
- baselineadminnetworkpolicies.policy.networking.k8s.io
- apiGroups:
- ""
resources:
- namespaces
- pods
- podtemplates
- services
- endpoints
- events
- configmaps
- serviceaccounts
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- ""
resources:
- resourcequotas
- secrets
verbs:
- list
- get
- watch
- apiGroups:
- ""
resources:
- resourcequotas
verbs:
- create
- get
- list
- update
- delete
- watch
resourceNames:
- calico-critical-pods
- tigera-critical-pods
- apiGroups:
- ""
resources:
- nodes
verbs:
# Need to update node labels when migrating nodes.
- get
- patch
- list
# We need this for Typha autoscaling
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- rolebindings
- roles
verbs:
- create
- get
- list
- update
- delete
- watch
- bind
- escalate
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- create
- get
- list
- patch
- update
- delete
- watch
- apiGroups:
- apps
resourceNames:
- tigera-operator
resources:
- deployments/finalizers
verbs:
- update
# The operator needs read and update permissions on the APIs that it controls.
- apiGroups:
- operator.tigera.io
resources:
# Note: any resources used by the operator within an OwnerReference for resources
# it creates requires permissions to <resource>/finalizers.
- apiservers
- apiservers/finalizers
- apiservers/status
- gatewayapis
- gatewayapis/finalizers
- gatewayapis/status
- goldmanes
- goldmanes/finalizers
- goldmanes/status
- imagesets
- installations
- installations/finalizers
- installations/status
- managementclusterconnections
- managementclusterconnections/finalizers
- managementclusterconnections/status
- tigerastatuses
- tigerastatuses/status
- tigerastatuses/finalizers
- whiskers
- whiskers/finalizers
- whiskers/status
verbs:
- get
- list
- update
- patch
- watch
# In addition to the above, the operator creates and deletes TigeraStatus resources.
- apiGroups:
- operator.tigera.io
resources:
- tigerastatuses
verbs:
- create
- delete
# In addition to the above, the operator should have the ability to delete their own resources during uninstallation.
- apiGroups:
- operator.tigera.io
resources:
- installations
- apiservers
- whiskers
- goldmanes
verbs:
- delete
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- create
- update
- delete
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- felixconfigurations
- ippools
verbs:
- create
- patch
- list
- get
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
- bgpconfigurations
- clusterinformations
verbs:
- get
- list
- watch
- apiGroups:
- projectcalico.org
resources:
- ippools
verbs:
- create
- update
- delete
- patch
- get
- list
- watch
- apiGroups:
- projectcalico.org
resources:
- ipamconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- scheduling.k8s.io
resources:
- priorityclasses
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- apiregistration.k8s.io
resources:
- apiservices
verbs:
- list
- watch
- create
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- delete
# Needed for operator lock
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- list
- watch
- update
- get
- create
- delete
# Add the permissions to monitor the status of certificate signing requests when certificate management is enabled.
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
# Add the appropriate pod security policy permissions
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- tigera-operator
verbs:
- use
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- get
- list
- watch
- create
- update
- delete
# For tiered network policy actions, tigera-apiserver requires that we authorize the operator for the tier.networkpolicies and tier.globalnetworkpolicies pseudo-kinds.
- apiGroups:
- projectcalico.org
resourceNames:
- allow-tigera.*
resources:
- tier.networkpolicies
- tier.globalnetworkpolicies
verbs:
- list
- watch
- get
- create
- update
- delete
# For tiered network policy actions, tigera-apiserver requires get authorization on the associated tier.
- apiGroups:
- projectcalico.org
resourceNames:
- allow-tigera
resources:
- tiers
verbs:
- get
- delete
- update
# Separated from the above rule since resourceNames does not support the create verb, and requires a field selector for list/watch verbs.
- apiGroups:
- projectcalico.org
resources:
- tiers
verbs:
- create
- list
- watch
# Additions for Gateway API support.
# 1. The operator needs to reconcile gateway.networking.k8s.io and gateway.envoyproxy.io CRDs.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- backendlbpolicies.gateway.networking.k8s.io
- backendtlspolicies.gateway.networking.k8s.io
- gatewayclasses.gateway.networking.k8s.io
- gateways.gateway.networking.k8s.io
- grpcroutes.gateway.networking.k8s.io
- httproutes.gateway.networking.k8s.io
- referencegrants.gateway.networking.k8s.io
- tcproutes.gateway.networking.k8s.io
- tlsroutes.gateway.networking.k8s.io
- udproutes.gateway.networking.k8s.io
- backends.gateway.envoyproxy.io
- backendtrafficpolicies.gateway.envoyproxy.io
- clienttrafficpolicies.gateway.envoyproxy.io
- envoyextensionpolicies.gateway.envoyproxy.io
- envoypatchpolicies.gateway.envoyproxy.io
- envoyproxies.gateway.envoyproxy.io
- httproutefilters.gateway.envoyproxy.io
- securitypolicies.gateway.envoyproxy.io
# 2. GatewayClasses and EnvoyProxy configurations.
- apiGroups:
- gateway.networking.k8s.io
resources:
- gatewayclasses
verbs:
- create
- update
- delete
- list
- get
- watch
- apiGroups:
- gateway.envoyproxy.io
resources:
- envoyproxies
verbs:
- create
- update
- delete
- list
- get
- watch
# 3. For Gateway API the operator needs to be able to create and reconcile a certificate
# generation job.
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- update
resourceNames:
- tigera-gateway-api-gateway-helm-certgen
---
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tigera-operator
labels:
k8s-app: tigera-operator
subjects:
- kind: ServiceAccount
name: tigera-operator
namespace: tigera-operator
roleRef:
kind: ClusterRole
name: tigera-operator
apiGroup: rbac.authorization.k8s.io
---
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator-secrets.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tigera-operator-secrets
namespace: tigera-operator
labels:
k8s-app: tigera-operator
subjects:
- kind: ServiceAccount
name: tigera-operator
namespace: tigera-operator
roleRef:
kind: ClusterRole
name: tigera-operator-secrets
apiGroup: rbac.authorization.k8s.io
---
# Source: tigera-operator/templates/tigera-operator/02-tigera-operator.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tigera-operator
namespace: tigera-operator
labels:
k8s-app: tigera-operator
spec:
replicas: 1
selector:
matchLabels:
name: tigera-operator
template:
metadata:
labels:
name: tigera-operator
k8s-app: tigera-operator
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
serviceAccountName: tigera-operator
# Set the termination grace period to match how long the operator will wait for
# resources to terminate when being uninstalled.
terminationGracePeriodSeconds: 60
hostNetwork: true
# This must be set when hostNetwork is true or else the cluster services won't resolve
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: tigera-operator
image: quay.io/tigera/operator:v1.38.3
imagePullPolicy: IfNotPresent
command:
- operator
args:
# Configure tigera-operator to manage installation of the necessary CRDs.
- -manage-crds=true
volumeMounts:
- name: var-lib-calico
readOnly: true
mountPath: /var/lib/calico
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "tigera-operator"
- name: TIGERA_OPERATOR_INIT_IMAGE_VERSION
value: v1.38.3
envFrom:
- configMapRef:
name: kubernetes-services-endpoint
optional: true
volumes:
- name: var-lib-calico
hostPath:
path: /var/lib/calico

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: whisker
namespace: calico-system
spec:
type: ClusterIP
clusterIP: 2001:470:7116:f:1::82
clusterIPs:
- 2001:470:7116:f:1::82
- 10.0.91.82
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: RequireDualStack
ports:
- name: http
port: 8081
targetPort: 8081
selector:
app.kubernetes.io/name: whisker

BIN
ceph/.DS_Store vendored Normal file

Binary file not shown.

27
ceph/ceph-conf.yaml Normal file
View File

@@ -0,0 +1,27 @@
---
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# enable ceph librbd,librados logs in rbd/cephfs/nfs container logs
# log_to_stderr = true
# debug_rbd = 30 # enable debug rbd logs
# debug_rados = 30 # enable debug rados logs
# debug_rbd_mirror = 30 # enable debugging logs for rbd mirroring daemon
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config
namespace: ceph

View File

@@ -0,0 +1,192 @@
---
kind: Service
apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
namespace: ceph
labels:
app: csi-metrics
spec:
selector:
app: csi-cephfsplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin-provisioner
namespace: ceph
spec:
selector:
matchLabels:
app: csi-cephfsplugin-provisioner
replicas: 3
template:
metadata:
labels:
app: csi-cephfsplugin-provisioner
spec:
#affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - csi-cephfsplugin-provisioner
# topologyKey: "kubernetes.io/hostname"
serviceAccountName: cephfs-csi-provisioner
priorityClassName: system-cluster-critical
containers:
- name: csi-cephfsplugin
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--pidlimit=-1"
- "--enableprofiling=false"
- "--setmetadata=true"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election=true"
- "--retry-interval-start=500ms"
- "--feature-gates=HonorPVReclaimPolicy=true"
- "--prevent-volume-mode-conversion=true"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election=true"
- "--extra-create-metadata=true"
- "--enable-volume-group-snapshots=false"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config

212
ceph/csi-cephfsplugin.yaml Normal file
View File

@@ -0,0 +1,212 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin
namespace: ceph
spec:
selector:
matchLabels:
app: csi-cephfsplugin
template:
metadata:
labels:
app: csi-cephfsplugin
spec:
serviceAccountName: cephfs-csi-nodeplugin
priorityClassName: system-node-critical
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: csi-cephfsplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--enableprofiling=false"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
#
# Options to enable read affinity.
# If enabled Ceph CSI will fetch labels from kubernetes node and
# pass `read_from_replica=localize,crush_location=type:value` during
# CephFS mount command. refer:
# https://docs.ceph.com/en/latest/man/8/rbd/#kernel-rbd-krbd-options
# for more details.
# - "--enable-read-affinity=true"
# - "--crush-location-labels=topology.io/zone,topology.io/rack"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: host-sys
mountPath: /sys
- name: etc-selinux
mountPath: /etc/selinux
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: host-mount
mountPath: /run/mount
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-mountinfo
mountPath: /csi/mountinfo
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-prometheus
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: host-mount
hostPath:
path: /run/mount
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-mountinfo
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
type: DirectoryOrCreate
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-cephfsplugin
namespace: ceph
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
selector:
app: csi-cephfsplugin

27
ceph/csi-config-map.yaml Normal file
View File

@@ -0,0 +1,27 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-config
namespace: ceph
data:
config.json: |-
[
{
"clusterID": "de115bee-2527-45a0-b0e8-50c30be4a907",
"monitors": [
"[fd00:0:0:2::61]:6789",
"[fd00:0:0:2::62]:6789",
"[fd00:0:0:2::63]:6789"
],
"cephFS": {
"subvolumeGroup": "csi"
}
}
]

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-encryption-kms-config
namespace: ceph
data:
config.json: "{}"

View File

@@ -0,0 +1,41 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-nodeplugin
namespace: ceph
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: cephfs-csi-nodeplugin
# replace with non-default namespace name
namespace: ceph
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,122 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-provisioner
namespace: ceph
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update", "patch", "create"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["get", "list", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch", "create"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
namespace: ceph
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: ceph
name: cephfs-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: ceph
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
# replace with non-default namespace name
namespace: ceph
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

17
ceph/csidriver.yaml Normal file
View File

@@ -0,0 +1,17 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "cephfs.csi.ceph.com"
spec:
attachRequired: false
podInfoOnMount: false
fsGroupPolicy: File
seLinuxMount: true

4
ceph/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ceph

24
ceph/secrets.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Secret
metadata:
name: cephfs-provisioner-secret
namespace: ceph
type: kubernetes.io/opaque
stringData:
adminID: k8s-csi
adminKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
#userID: k8s-csi
#userKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
---
apiVersion: v1
kind: Secret
metadata:
name: cephfs-node-secret
namespace: ceph
type: kubernetes.io/opaque
stringData:
userID: k8s-csi
userKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
adminID: k8s-csi
adminKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="

33
ceph/storage-class.yaml Normal file
View File

@@ -0,0 +1,33 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephfs-hyper
provisioner: cephfs.csi.ceph.com
parameters:
# Identifiant du cluster Ceph (FSID)
clusterID: "de115bee-2527-45a0-b0e8-50c30be4a907"
# Liste des MON(s) (tu peux mettre v4, v6, ou les deux)
# Format: "ip1:port,ip2:port,[v6-addr]:port"
monitors: "[fd00:0:0:2::61]:6789,[fd00:0:0:2::62]:6789,[fd00:0:0:2::63]:6789"
# Nom du filesystem CephFS et subvolume group
fsName: "cephfs"
subvolumeGroup: "csi"
# Secrets utilisés par le driver
csi.storage.k8s.io/provisioner-secret-name: cephfs-provisioner-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph
csi.storage.k8s.io/controller-expand-secret-name: cephfs-provisioner-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph
csi.storage.k8s.io/node-stage-secret-name: cephfs-node-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph
# (optionnel) mounter: kernel|fuse
# mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- rw

BIN
cert-manager/.DS_Store vendored Normal file

Binary file not shown.

39
cert-manager/README.md Normal file
View File

@@ -0,0 +1,39 @@
# cert manager
## creates (lets encrypt) certifcates automatically
### if anotated in an ingress definition
#trust-manager
trust-manager is the easiest way to manage trust bundles in Kubernetes and OpenShift clusters.
###
important:
cert manager is required and trust manager is being installed in the cert-manager namespace!!!
It orchestrates bundles of trusted X.509 certificates which are primarily used for validating certificates during a TLS handshake but can be used in other situations, too.
##Overview
trust-manager is a small Kubernetes operator which aims to help reduce the overhead of managing TLS trust bundles in your clusters.
It adds the Bundle custom Kubernetes resource (CRD) which can read input from various sources and combine the resultant certificates into a bundle ready to be used by your applications.
trust-manager ensures that it's both quick and easy to keep your trusted certificates up-to-date and enables cluster administrators to easily automate providing a secure bundle without having to worry about rebuilding containers to update trust stores.
It's designed to complement cert-manager and works well when consuming CA certificates from a cert-manager Issuer or ClusterIssuer but can be used entirely independently from cert-manager if needed.
##Usage
trust-manager is intentionally simple, and adds one new Kubernetes CustomResourceDefintion: Bundle.
A Bundle represents a set of PEM-encoded X.509 certificates that should be distributed and made available across the cluster. Bundles are cluster scoped.
Users specify a list of sources, which trust-manager will query and concatenate certificate data from. The only other required field is the target, which describes how and where the resulting bundle will be written.
improvements:
metrics
liveness probes
resource limits

16
cert-manager/bundles.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: trust.cert-manager.io/v1alpha1
kind: Bundle
metadata:
name: undercloud-ca-bundle
spec:
sources:
- secret:
name: "root-secret"
key: "tls.crt"
- useDefaultCAs: true
target:
configMap:
key: "trust-bundle.pem"
namespaceSelector:
matchLabels:
undercloud.dev/cert: "ca"

View File

@@ -0,0 +1,412 @@
# Copyright 2022 The cert-manager Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Source: cert-manager/templates/webhook-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cert-manager-webhook
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
data:
---
# Source: cert-manager/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cert-manager
namespace: cert-manager
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
spec:
type: ClusterIP
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- protocol: TCP
port: 9402
name: tcp-prometheus-servicemonitor
targetPort: 9402
selector:
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
---
# Source: cert-manager/templates/webhook-service.yaml
apiVersion: v1
kind: Service
metadata:
name: cert-manager-webhook
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
spec:
type: ClusterIP
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: https
port: 443
protocol: TCP
targetPort: "https"
selector:
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
---
# Source: cert-manager/templates/cainjector-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager-cainjector
namespace: cert-manager
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
template:
metadata:
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
spec:
dnsConfig:
options:
- name: ndots
value: "1"
dnsConfig:
options:
- name: ndots
value: "1"
serviceAccountName: cert-manager-cainjector
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: cert-manager-cainjector
image: "quay.io/jetstack/cert-manager-cainjector:v1.11.0"
imagePullPolicy: IfNotPresent
args:
- --v=2
- --leader-election-namespace=kube-system
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
nodeSelector:
kubernetes.io/os: linux
---
# Source: cert-manager/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager
namespace: cert-manager
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
template:
metadata:
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
annotations:
prometheus.io/path: "/metrics"
prometheus.io/scrape: 'true'
prometheus.io/port: '9402'
spec:
dnsConfig:
options:
- name: ndots
value: "1"
hostAliases:
- ip: "2001:470:7116:f:1::b492"
hostnames:
- "ldap.apps.undercloud.dev"
- "mail.apps.undercloud.dev"
- "smtp.apps.undercloud.dev"
- "imap.apps.undercloud.dev"
- "autoconfig.apps.undercloud.dev"
- "autodiscover.apps.undercloud.dev"
- ip: "10.0.91.211"
hostnames:
- "ldap.apps.undercloud.dev"
- "mail.apps.undercloud.dev"
- "smtp.apps.undercloud.dev"
- "imap.apps.undercloud.dev"
- "autoconfig.apps.undercloud.dev"
- "autodiscover.apps.undercloud.dev"
serviceAccountName: cert-manager
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: cert-manager-controller
image: "quay.io/jetstack/cert-manager-controller:v1.11.0"
imagePullPolicy: IfNotPresent
args:
- --v=2
- --cluster-resource-namespace=$(POD_NAMESPACE)
- --leader-election-namespace=kube-system
- --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.11.0
- --max-concurrent-challenges=60
ports:
- containerPort: 9402
name: http-metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
nodeSelector:
kubernetes.io/os: linux
---
# Source: cert-manager/templates/webhook-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cert-manager-webhook
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
template:
metadata:
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
spec:
dnsConfig:
options:
- name: ndots
value: "1"
serviceAccountName: cert-manager-webhook
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: cert-manager-webhook
image: "quay.io/jetstack/cert-manager-webhook:v1.11.0"
imagePullPolicy: IfNotPresent
args:
- --v=2
- --secure-port=10250
- --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE)
- --dynamic-serving-ca-secret-name=cert-manager-webhook-ca
- --dynamic-serving-dns-names=cert-manager-webhook
- --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE)
- --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc
ports:
- name: https
protocol: TCP
containerPort: 10250
- name: healthcheck
protocol: TCP
containerPort: 6080
livenessProbe:
httpGet:
path: /livez
port: 6080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz
port: 6080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
nodeSelector:
kubernetes.io/os: linux
---
# Source: cert-manager/templates/webhook-mutating-webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: cert-manager-webhook
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
annotations:
cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca"
webhooks:
- name: webhook.cert-manager.io
rules:
- apiGroups:
- "cert-manager.io"
- "acme.cert-manager.io"
apiVersions:
- "v1"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
# This webhook only accepts v1 cert-manager resources.
# Equivalent matchPolicy ensures that non-v1 resource requests are sent to
# this webhook (after the resources have been converted to v1).
matchPolicy: Equivalent
timeoutSeconds: 10
failurePolicy: Fail
# Only include 'sideEffects' field in Kubernetes 1.12+
sideEffects: None
clientConfig:
service:
name: cert-manager-webhook
namespace: cert-manager
path: /mutate
---
# Source: cert-manager/templates/webhook-validating-webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: cert-manager-webhook
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
annotations:
cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca"
webhooks:
- name: webhook.cert-manager.io
namespaceSelector:
matchExpressions:
- key: "cert-manager.io/disable-validation"
operator: "NotIn"
values:
- "true"
- key: "name"
operator: "NotIn"
values:
- cert-manager
rules:
- apiGroups:
- "cert-manager.io"
- "acme.cert-manager.io"
apiVersions:
- "v1"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
# This webhook only accepts v1 cert-manager resources.
# Equivalent matchPolicy ensures that non-v1 resource requests are sent to
# this webhook (after the resources have been converted to v1).
matchPolicy: Equivalent
timeoutSeconds: 10
failurePolicy: Fail
sideEffects: None
clientConfig:
service:
name: cert-manager-webhook
namespace: cert-manager
path: /validate

4409
cert-manager/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

72
cert-manager/issuers.yaml Normal file
View File

@@ -0,0 +1,72 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: selfsigned-ca
spec:
subject:
organizations:
- undercloud
isCA: true
commonName: selfsigned-ca
secretName: root-secret
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: selfsigned
kind: ClusterIssuer
group: cert-manager.io
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: ca
spec:
ca:
secretName: root-secret
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
email: thrawn235@gmail.com
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-staging
# Add a single challenge solver, HTTP01 using nginx
solvers:
- http01:
ingress:
class: nginx
serviceType: ClusterIP
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: thrawn235@gmail.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt
# Add a single challenge solver, HTTP01 using nginx
solvers:
- http01:
ingress:
class: nginx
serviceType: ClusterIP

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
prometheus: prometheus
---

704
cert-manager/rbac.yaml Normal file
View File

@@ -0,0 +1,704 @@
# Source: cert-manager/templates/cainjector-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-cainjector
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "create", "update", "patch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch", "update"]
---
# Source: cert-manager/templates/rbac.yaml
# Issuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-issuers
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "issuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Source: cert-manager/templates/rbac.yaml
# ClusterIssuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-clusterissuers
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "clusterissuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Source: cert-manager/templates/rbac.yaml
# Certificates controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-certificates
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["cert-manager.io"]
resources: ["certificates/finalizers", "certificaterequests/finalizers"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders"]
verbs: ["create", "delete", "get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Source: cert-manager/templates/rbac.yaml
# Orders controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-orders
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "orders/status"]
verbs: ["update", "patch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "challenges"]
verbs: ["get", "list", "watch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["create", "delete"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Source: cert-manager/templates/rbac.yaml
# Challenges controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-challenges
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
# Use to update challenge resource status
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "challenges/status"]
verbs: ["update", "patch"]
# Used to watch challenge resources
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["get", "list", "watch"]
# Used to watch challenges, issuer and clusterissuer resources
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
# Need to be able to retrieve ACME account private key to complete challenges
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
# Used to create events
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
# HTTP01 rules
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
- apiGroups: [ "gateway.networking.k8s.io" ]
resources: [ "httproutes" ]
verbs: ["get", "list", "watch", "create", "delete", "update"]
# We require the ability to specify a custom hostname when we are creating
# new ingress resources.
# See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148
- apiGroups: ["route.openshift.io"]
resources: ["routes/custom-host"]
verbs: ["create"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges/finalizers"]
verbs: ["update"]
# DNS01 rules (duplicated above)
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
# Source: cert-manager/templates/rbac.yaml
# ingress-shim controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-ingress-shim
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests"]
verbs: ["create", "update", "delete"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses/finalizers"]
verbs: ["update"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways", "httproutes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways/finalizers", "httproutes/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-view
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["get", "list", "watch"]
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-edit
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates/status"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Source: cert-manager/templates/rbac.yaml
# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-approve:cert-manager-io
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cert-manager"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["cert-manager.io"]
resources: ["signers"]
verbs: ["approve"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
---
# Source: cert-manager/templates/rbac.yaml
# Permission to:
# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers
# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-controller-certificatesigningrequests
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cert-manager"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["certificates.k8s.io"]
resources: ["signers"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
verbs: ["sign"]
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
---
# Source: cert-manager/templates/webhook-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cert-manager-webhook:subjectaccessreviews
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
---
# Source: cert-manager/templates/cainjector-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-cainjector
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-cainjector
subjects:
- name: cert-manager-cainjector
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-issuers
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-issuers
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-clusterissuers
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-clusterissuers
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-certificates
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-certificates
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-orders
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-orders
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-challenges
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-challenges
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-ingress-shim
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-ingress-shim
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-approve:cert-manager-io
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cert-manager"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-approve:cert-manager-io
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-controller-certificatesigningrequests
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cert-manager"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-controller-certificatesigningrequests
subjects:
- name: cert-manager
namespace: cert-manager
kind: ServiceAccount
---
# Source: cert-manager/templates/webhook-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cert-manager-webhook:subjectaccessreviews
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager-webhook:subjectaccessreviews
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook
namespace: cert-manager
---
# Source: cert-manager/templates/cainjector-rbac.yaml
# leader election rules
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-cainjector:leaderelection
namespace: kube-system
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
rules:
# Used for leader election by the controller
# cert-manager-cainjector-leader-election is used by the CertificateBased injector controller
# see cmd/cainjector/start.go#L113
# cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller
# see cmd/cainjector/start.go#L137
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# Source: cert-manager/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager:leaderelection
namespace: kube-system
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-controller"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# Source: cert-manager/templates/webhook-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cert-manager-webhook:dynamic-serving
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames:
- 'cert-manager-webhook-ca'
verbs: ["get", "list", "watch", "update"]
# It's not possible to grant CREATE permission on a single resourceName.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
---
# Source: cert-manager/templates/cainjector-rbac.yaml
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-cainjector:leaderelection
namespace: kube-system
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-cainjector:leaderelection
subjects:
- kind: ServiceAccount
name: cert-manager-cainjector
namespace: cert-manager
---
# Source: cert-manager/templates/rbac.yaml
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager:leaderelection
namespace: kube-system
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager:leaderelection
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager
namespace: cert-manager
---
# Source: cert-manager/templates/webhook-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cert-manager-webhook:dynamic-serving
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cert-manager-webhook:dynamic-serving
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cert-manager-webhook
namespace: cert-manager
---

View File

@@ -0,0 +1,37 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: cert-manager
namespace: cert-manager
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app: cert-manager
endpoints:
- port: tcp-prometheus-servicemonitor
#path: /metrics
interval: 5s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: trust-manager
namespace: cert-manager
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app: trust-manager
endpoints:
- port: metrics
#path: /metrics
interval: 5s

View File

@@ -0,0 +1,42 @@
# Source: cert-manager/templates/cainjector-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: cert-manager-cainjector
namespace: cert-manager
labels:
app: cainjector
app.kubernetes.io/name: cainjector
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "cainjector"
app.kubernetes.io/version: "v1.11.0"
---
# Source: cert-manager/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: cert-manager
namespace: cert-manager
labels:
app: cert-manager
app.kubernetes.io/name: cert-manager
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "controller"
app.kubernetes.io/version: "v1.11.0"
---
# Source: cert-manager/templates/webhook-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: cert-manager-webhook
namespace: cert-manager
labels:
app: webhook
app.kubernetes.io/name: webhook
app.kubernetes.io/instance: cert-manager
app.kubernetes.io/component: "webhook"
app.kubernetes.io/version: "v1.11.0"
---

View File

@@ -0,0 +1,533 @@
---
# Source: trust-manager/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: trust-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
---
# Source: trust-manager/templates/trust.cert-manager.io_bundles.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
creationTimestamp: null
name: bundles.trust.cert-manager.io
spec:
group: trust.cert-manager.io
names:
kind: Bundle
listKind: BundleList
plural: bundles
singular: bundle
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Bundle Target Key
jsonPath: .status.target.configMap.key
name: Target
type: string
- description: Bundle has been synced
jsonPath: .status.conditions[?(@.type == "Synced")].status
name: Synced
type: string
- description: Reason Bundle has Synced status
jsonPath: .status.conditions[?(@.type == "Synced")].reason
name: Reason
type: string
- description: Timestamp Bundle was created
jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Desired state of the Bundle resource.
type: object
required:
- sources
- target
properties:
sources:
description: Sources is a set of references to data whose data will sync to the target.
type: array
items:
description: BundleSource is the set of sources whose data will be appended and synced to the BundleTarget in all Namespaces.
type: object
properties:
configMap:
description: ConfigMap is a reference to a ConfigMap's `data` key, in the trust Namespace.
type: object
required:
- key
- name
properties:
key:
description: Key is the key of the entry in the object's `data` field to be used.
type: string
name:
description: Name is the name of the source object in the trust Namespace.
type: string
inLine:
description: InLine is a simple string to append as the source data.
type: string
secret:
description: Secret is a reference to a Secrets's `data` key, in the trust Namespace.
type: object
required:
- key
- name
properties:
key:
description: Key is the key of the entry in the object's `data` field to be used.
type: string
name:
description: Name is the name of the source object in the trust Namespace.
type: string
useDefaultCAs:
description: UseDefaultCAs, when true, requests the default CA bundle to be used as a source. Default CAs are available if trust-manager was installed via Helm or was otherwise set up to include a package-injecting init container by using the "--default-package-location" flag when starting the trust-manager controller. If default CAs were not configured at start-up, any request to use the default CAs will fail. The version of the default CA package which is used for a Bundle is stored in the defaultCAPackageVersion field of the Bundle's status field.
type: boolean
target:
description: Target is the target location in all namespaces to sync source data to.
type: object
properties:
configMap:
description: ConfigMap is the target ConfigMap in Namespaces that all Bundle source data will be synced to.
type: object
required:
- key
properties:
key:
description: Key is the key of the entry in the object's `data` field to be used.
type: string
namespaceSelector:
description: NamespaceSelector will, if set, only sync the target resource in Namespaces which match the selector.
type: object
properties:
matchLabels:
description: MatchLabels matches on the set of labels that must be present on a Namespace for the Bundle target to be synced there.
type: object
additionalProperties:
type: string
status:
description: Status of the Bundle. This is set and managed automatically.
type: object
properties:
conditions:
description: List of status conditions to indicate the status of the Bundle. Known condition types are `Bundle`.
type: array
items:
description: BundleCondition contains condition information for a Bundle.
type: object
required:
- status
- type
properties:
lastTransitionTime:
description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
type: string
format: date-time
message:
description: Message is a human readable description of the details of the last transition, complementing reason.
type: string
observedGeneration:
description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Bundle.
type: integer
format: int64
reason:
description: Reason is a brief machine readable explanation for the condition's last transition.
type: string
status:
description: Status of the condition, one of ('True', 'False', 'Unknown').
type: string
type:
description: Type of the condition, known values are (`Synced`).
type: string
defaultCAVersion:
description: DefaultCAPackageVersion, if set and non-empty, indicates the version information which was retrieved when the set of default CAs was requested in the bundle source. This should only be set if useDefaultCAs was set to "true" on a source, and will be the same for the same version of a bundle with identical certificates.
type: string
target:
description: Target is the current Target that the Bundle is attempting or has completed syncing the source data to.
type: object
properties:
configMap:
description: ConfigMap is the target ConfigMap in Namespaces that all Bundle source data will be synced to.
type: object
required:
- key
properties:
key:
description: Key is the key of the entry in the object's `data` field to be used.
type: string
namespaceSelector:
description: NamespaceSelector will, if set, only sync the target resource in Namespaces which match the selector.
type: object
properties:
matchLabels:
description: MatchLabels matches on the set of labels that must be present on a Namespace for the Bundle target to be synced there.
type: object
additionalProperties:
type: string
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: trust-manager/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
name: trust-manager
rules:
- apiGroups:
- "trust.cert-manager.io"
resources:
- "bundles"
verbs: ["get", "list", "watch"]
# Permissions to update finalizers are required for trust-manager to work correctly
# on OpenShift, even though we don't directly use finalizers at the time of writing
- apiGroups:
- "trust.cert-manager.io"
resources:
- "bundles/finalizers"
verbs: ["update"]
- apiGroups:
- "trust.cert-manager.io"
resources:
- "bundles/status"
verbs: ["update"]
- apiGroups:
- ""
resources:
- "configmaps"
verbs: ["get", "list", "create", "update", "watch", "delete"]
- apiGroups:
- ""
resources:
- "namespaces"
verbs: ["get", "list", "watch"]
- apiGroups:
- ""
resources:
- "events"
verbs: ["create", "patch"]
---
# Source: trust-manager/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
name: trust-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: trust-manager
subjects:
- kind: ServiceAccount
name: trust-manager
namespace: cert-manager
---
# Source: trust-manager/templates/role.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: trust-manager
namespace: cert-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- "secrets"
verbs:
- "get"
- "list"
- "watch"
- apiGroups:
- "coordination.k8s.io"
resources:
- "leases"
verbs:
- "get"
- "create"
- "update"
- "watch"
- "list"
---
# Source: trust-manager/templates/rolebinding.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: trust-manager
namespace: cert-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: trust-manager
subjects:
- kind: ServiceAccount
name: trust-manager
namespace: cert-manager
---
# Source: trust-manager/templates/metrics-service.yaml
apiVersion: v1
kind: Service
metadata:
name: trust-manager-metrics
labels:
app: trust-manager
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9402
targetPort: 9402
protocol: TCP
name: metrics
selector:
app: trust-manager
---
# Source: trust-manager/templates/webhook.yaml
apiVersion: v1
kind: Service
metadata:
name: trust-manager
labels:
app: trust-manager
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 443
targetPort: 6443
protocol: TCP
name: webhook
selector:
app: trust-manager
---
# Source: trust-manager/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: trust-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app: trust-manager
template:
metadata:
labels:
app: trust-manager
spec:
serviceAccountName: trust-manager
initContainers:
- name: cert-manager-package-debian
image: "quay.io/jetstack/cert-manager-package-debian:20210119.0"
imagePullPolicy: IfNotPresent
args:
- "/copyandmaybepause"
- "/debian-package"
- "/packages"
volumeMounts:
- mountPath: /packages
name: packages
readOnly: false
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: trust-manager
image: "quay.io/jetstack/trust-manager:v0.4.0"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 6443
- containerPort: 9402
readinessProbe:
httpGet:
port: 6060
path: /readyz
initialDelaySeconds: 3
periodSeconds: 7
command: ["trust-manager"]
args:
- "--log-level=1"
- "--metrics-port=9402"
- "--readiness-probe-port=6060"
- "--readiness-probe-path=/readyz"
# trust
- "--trust-namespace=cert-manager"
# webhook
- "--webhook-host=0.0.0.0"
- "--webhook-port=6443"
- "--webhook-certificate-dir=/tls"
- "--default-package-location=/packages/cert-manager-package-debian.json"
volumeMounts:
- mountPath: /tls
name: tls
readOnly: true
- mountPath: /packages
name: packages
readOnly: true
resources:
{}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumes:
- name: packages
emptyDir: {}
- name: tls
secret:
defaultMode: 420
secretName: trust-manager-tls
---
# Source: trust-manager/templates/certificate.yaml
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: trust-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
spec:
dnsNames:
- "trust-manager.cert-manager.svc"
secretName: trust-manager-tls
revisionHistoryLimit: 1
issuerRef:
name: trust-manager
kind: Issuer
group: cert-manager.io
---
# Source: trust-manager/templates/certificate.yaml
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: trust-manager
labels:
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
spec:
selfSigned: {}
---
# Source: trust-manager/templates/webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: trust-manager
labels:
app: trust-manager
app.kubernetes.io/name: trust-manager
helm.sh/chart: trust-manager-v0.4.0
app.kubernetes.io/instance: trust-manager
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
annotations:
cert-manager.io/inject-ca-from: "cert-manager/trust-manager"
webhooks:
- name: trust.cert-manager.io
rules:
- apiGroups:
- "trust.cert-manager.io"
apiVersions:
- "*"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
timeoutSeconds: 5
failurePolicy: Fail
sideEffects: None
clientConfig:
service:
name: trust-manager
namespace: "cert-manager"
path: /validate

BIN
code-server/.DS_Store vendored Normal file

Binary file not shown.

10
code-server/README.md Normal file
View File

@@ -0,0 +1,10 @@
# code server
## online IDE
### in the style of visual studio code
the style has to be set manually after bootstrap
improvements:
metrics
liveness probes
resource limits

View File

@@ -0,0 +1,90 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-csi-hourly
namespace: velero
spec:
schedule: "0 15-22 * * *"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["code-server"]
ttl: 8h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-csi-daily
namespace: velero
spec:
schedule: "0 0 * * *"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["code-server"]
ttl: 168h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-csi-weekly
namespace: velero
spec:
schedule: "0 0 * * 1"
useOwnerReferencesInBackup: true
template:
includedNamespaces: ["code-server"]
ttl: 730h
snapshotVolumes: true
defaultVolumesToFsBackup: false
csiSnapshotTimeout: 10m
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-daily
namespace: velero
spec:
schedule: "30 2 * * *" # tous les jours 02:30
useOwnerReferencesInBackup: true
template:
includedNamespaces: [code-server]
storageLocation: default
ttl: 336h # ~14 jours
snapshotVolumes: false
defaultVolumesToFsBackup: true
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-weekly
namespace: velero
spec:
schedule: "0 3 * * 0" # chaque dimanche 03:00
useOwnerReferencesInBackup: true
template:
includedNamespaces: [code-server]
storageLocation: default
ttl: 1344h # ~8 semaines
snapshotVolumes: false
defaultVolumesToFsBackup: true
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: code-server-monthly
namespace: velero
spec:
schedule: "0 4 1 * *" # 1er du mois 04:00
useOwnerReferencesInBackup: true
template:
includedNamespaces: [code-server]
storageLocation: default
ttl: 8760h # ~12 mois
snapshotVolumes: false
defaultVolumesToFsBackup: true

View File

@@ -0,0 +1,176 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: startup
namespace: code-server
data:
startup.sh: |
#!/bin/sh
echo "startup..."
sleep 10s
git config --global user.name shodan
git config --global user.email "thrawn235@gmail.com"
if test ! -f "/home/coder/.config/startup.ran"; then
echo "waiting 60s for startup..."
sleep 60s
echo "cloning k8aux-apps"
cd /home/coder/project
git clone https://shodan:$SHODAN_PW@gitea.apps.undercloud.dev/Undercloud/k8s-apps.git
#git clone https://gitea.apps.undercloud.dev/Undercloud/k8s-apps.git
git clone http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure.git
git config --global credential.helper store
echo "create startup.ran file..."
touch /home/coder/.config/startup.ran
else
echo "startup ran already!"
fi
echo "startup done."
#exit 123
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
namespace: code-server
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 64M
storageClassName: cephfs-hyper
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: project
namespace: code-server
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 8G
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: code-server
namespace: code-server
labels:
app: code-server
spec:
replicas: 1
selector:
matchLabels:
app: code-server
template:
metadata:
labels:
app: code-server
spec:
dnsConfig:
options:
- name: ndots
value: "1"
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
containers:
- name: code-server
image: codercom/code-server
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- "/home/coder/startup.sh"
ports:
- containerPort: 8080
- name: metrics
containerPort: 2112
env:
- name: CODER_PROMETHEUS_ENABLE
value: "0.0.0.0:2112"
- name: DOCKER_USER
value: docker
- name: PASSWORD
valueFrom:
secretKeyRef:
name: admin
key: pw
- name: SHODAN_PW
valueFrom:
secretKeyRef:
name: shodan
key: pw
optional: false
volumeMounts:
- mountPath: /home/coder/.config
name: data
subPath: config
- mountPath: /home/coder/.local/share/code-server
name: data
subPath: local
- mountPath: /home/coder/project
name: project
- mountPath: /home/coder/startup.sh
name: startup
subPath: startup.sh
volumes:
- name: data
persistentVolumeClaim:
claimName: data
readOnly: false
- name: project
persistentVolumeClaim:
claimName: project
readOnly: false
- name: startup
configMap:
name: startup
defaultMode: 0777
items:
- key: "startup.sh"
path: "startup.sh"
---
apiVersion: v1
kind: Service
metadata:
name: code-server
namespace: code-server
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: code-server
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: code-server-metrics
namespace: code-server
labels:
app: code-server-metrics
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: metrics
port: 2112
protocol: TCP
selector:
app: code-server
sessionAffinity: None
type: ClusterIP

25
code-server/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: code-server
namespace: code-server
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- code-server.apps.undercloud.dev
secretName: code-server-tls
rules:
- host: code-server.apps.undercloud.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: code-server
port:
number: 80

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: code-server
labels:
prometheus: prometheus

17
code-server/secrets.yaml Normal file
View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: admin
namespace: code-server
type: Opaque
data:
pw: NElzVGhlTWluZEtpbGxlcg==
---
apiVersion: v1
kind: Secret
metadata:
name: shodan
namespace: code-server
type: Opaque
data:
pw: NElzVGhlTWluZEtpbGxlcg==

Some files were not shown because too many files have changed in this diff Show More