This commit is contained in:
2025-09-01 21:13:20 +02:00
parent 23ef2ce590
commit 666088c80d
24 changed files with 822 additions and 180 deletions

BIN
.DS_Store vendored

Binary file not shown.

BIN
ceph/.DS_Store vendored Normal file

Binary file not shown.

26
ceph/ceph-conf.yaml Normal file
View File

@@ -0,0 +1,26 @@
---
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# enable ceph librbd,librados logs in rbd/cephfs/nfs container logs
# log_to_stderr = true
# debug_rbd = 30 # enable debug rbd logs
# debug_rados = 30 # enable debug rados logs
# debug_rbd_mirror = 30 # enable debugging logs for rbd mirroring daemon
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config

View File

@@ -0,0 +1,190 @@
---
kind: Service
apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
labels:
app: csi-metrics
spec:
selector:
app: csi-cephfsplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin-provisioner
spec:
selector:
matchLabels:
app: csi-cephfsplugin-provisioner
replicas: 3
template:
metadata:
labels:
app: csi-cephfsplugin-provisioner
spec:
#affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - csi-cephfsplugin-provisioner
# topologyKey: "kubernetes.io/hostname"
serviceAccountName: cephfs-csi-provisioner
priorityClassName: system-cluster-critical
containers:
- name: csi-cephfsplugin
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--pidlimit=-1"
- "--enableprofiling=false"
- "--setmetadata=true"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election=true"
- "--retry-interval-start=500ms"
- "--feature-gates=HonorPVReclaimPolicy=true"
- "--prevent-volume-mode-conversion=true"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election=true"
- "--extra-create-metadata=true"
- "--enable-volume-group-snapshots=false"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config

210
ceph/csi-cephfsplugin.yaml Normal file
View File

@@ -0,0 +1,210 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-cephfsplugin
spec:
selector:
matchLabels:
app: csi-cephfsplugin
template:
metadata:
labels:
app: csi-cephfsplugin
spec:
serviceAccountName: cephfs-csi-nodeplugin
priorityClassName: system-node-critical
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: csi-cephfsplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--nodeid=$(NODE_ID)"
- "--type=cephfs"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=cephfs.csi.ceph.com"
- "--enableprofiling=false"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
#
# Options to enable read affinity.
# If enabled Ceph CSI will fetch labels from kubernetes node and
# pass `read_from_replica=localize,crush_location=type:value` during
# CephFS mount command. refer:
# https://docs.ceph.com/en/latest/man/8/rbd/#kernel-rbd-krbd-options
# for more details.
# - "--enable-read-affinity=true"
# - "--crush-location-labels=topology.io/zone,topology.io/rack"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: host-sys
mountPath: /sys
- name: etc-selinux
mountPath: /etc/selinux
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: host-dev
mountPath: /dev
- name: host-mount
mountPath: /run/mount
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-csi-mountinfo
mountPath: /csi/mountinfo
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-prometheus
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:v3.12.0
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8681"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: lib-modules
hostPath:
path: /lib/modules
- name: host-dev
hostPath:
path: /dev
- name: host-mount
hostPath:
path: /run/mount
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-mountinfo
hostPath:
path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
type: DirectoryOrCreate
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-cephfsplugin
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8681
selector:
app: csi-cephfsplugin

27
ceph/csi-config-map.yaml Normal file
View File

@@ -0,0 +1,27 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-config
namespace: ceph
data:
config.json: |-
[
{
"clusterID": "de115bee-2527-45a0-b0e8-50c30be4a907",
"monitors": [
"[fd00:0:0:2::61]:6789",
"[fd00:0:0:2::62]:6789",
"[fd00:0:0:2::63]:6789"
],
"cephFS": {
"subvolumeGroup": "csi"
}
}
]

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-encryption-kms-config
namespace: ceph
data:
config.json: "{}"

View File

@@ -0,0 +1,41 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-nodeplugin
namespace: ceph
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: cephfs-csi-nodeplugin
# replace with non-default namespace name
namespace: ceph
roleRef:
kind: ClusterRole
name: cephfs-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,122 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-csi-provisioner
namespace: ceph
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update", "patch", "create"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["get", "list", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch", "create"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents/status"]
verbs: ["update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
namespace: ceph
roleRef:
kind: ClusterRole
name: cephfs-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: ceph
name: cephfs-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: ceph
subjects:
- kind: ServiceAccount
name: cephfs-csi-provisioner
# replace with non-default namespace name
namespace: ceph
roleRef:
kind: Role
name: cephfs-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

17
ceph/csidriver.yaml Normal file
View File

@@ -0,0 +1,17 @@
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "cephfs.csi.ceph.com"
spec:
attachRequired: false
podInfoOnMount: false
fsGroupPolicy: File
seLinuxMount: true

4
ceph/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ceph

24
ceph/secrets.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Secret
metadata:
name: cephfs-provisioner-secret
namespace: ceph
type: kubernetes.io/opaque
stringData:
adminID: k8s-csi
adminKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
#userID: k8s-csi
#userKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
---
apiVersion: v1
kind: Secret
metadata:
name: cephfs-node-secret
namespace: ceph
type: kubernetes.io/opaque
stringData:
userID: k8s-csi
userKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="
adminID: k8s-csi
adminKey: "AQAcc59oml/CHRAA9xzJ6z3wkaz+0lz6Awqofg=="

33
ceph/storage-class.yaml Normal file
View File

@@ -0,0 +1,33 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cephfs-hyper
provisioner: cephfs.csi.ceph.com
parameters:
# Identifiant du cluster Ceph (FSID)
clusterID: "de115bee-2527-45a0-b0e8-50c30be4a907"
# Liste des MON(s) (tu peux mettre v4, v6, ou les deux)
# Format: "ip1:port,ip2:port,[v6-addr]:port"
monitors: "[fd00:0:0:2::61]:6789,[fd00:0:0:2::62]:6789,[fd00:0:0:2::63]:6789"
# Nom du filesystem CephFS et subvolume group
fsName: "cephfs"
subvolumeGroup: "csi"
# Secrets utilisés par le driver
csi.storage.k8s.io/provisioner-secret-name: cephfs-provisioner-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph
csi.storage.k8s.io/controller-expand-secret-name: cephfs-provisioner-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph
csi.storage.k8s.io/node-stage-secret-name: cephfs-node-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph
# (optionnel) mounter: kernel|fuse
# mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- rw

View File

@@ -1,32 +1,3 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: db
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -38,7 +9,7 @@ spec:
resources:
requests:
storage: 16Gi
storageClassName: db
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment
@@ -129,35 +100,6 @@ spec:
sessionAffinity: None
type: ClusterIP
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: db-backup
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -169,7 +111,7 @@ spec:
resources:
requests:
storage: 10Gi
storageClassName: db-backup
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment

View File

@@ -1,42 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: gitea
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

View File

@@ -27,7 +27,7 @@ data:
DOMAIN = localhost
SSH_DOMAIN = localhost
HTTP_PORT = 3000
ROOT_URL = https://gitea.undercloud.cf/
ROOT_URL = https://gitea.apps.undercloud.dev/
DISABLE_SSH = false
SSH_PORT = 22
SSH_LISTEN_PORT = 22
@@ -127,14 +127,14 @@ data:
echo $ARGOCD_PW
su git -c 'echo $ARGOCD_PW'
su git -c 'SHODAN_PW=`cat /data/shodan.pw` && gitea admin user create --username shodan --admin --password $SHODAN_PW --email thrawn235@gmail.com'
su git -c 'ARGOCD_PW=`cat /data/argocd.pw` && gitea admin user create --username argocd --password $ARGOCD_PW --email argocd@undercloud.cf --must-change-password=false'
su git -c 'GITEA_PW=`cat /data/gitea.pw` && gitea admin auth add-ldap --name ldap --security-protocol StartTLS --host ldap.undercloud.cf. --port 389 --user-search-base "ou=users,dc=undercloud,dc=cf" --user-filter "(&(objectClass=person)(uid=%s))" --admin-filter "(&(memberOf=cn=gitea-admins,ou=groups,dc=undercloud,dc=cf))" --email-attribute mail --avatar-attribute jpegPhoto --synchronize-users --skip-tls-verify --username-attribute uid --bind-dn "cn=gitea,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf" --bind-password $GITEA_PW --attributes-in-bind --firstname-attribute cn --surname-attribute sn'
su git -c 'ARGOCD_PW=`cat /data/argocd.pw` && gitea admin user create --username argocd --password $ARGOCD_PW --email argocd@undercloud.local --must-change-password=false'
su git -c 'GITEA_PW=`cat /data/gitea.pw` && gitea admin auth add-ldap --name ldap --security-protocol StartTLS --host ldap.undercloud.local. --port 389 --user-search-base "ou=users,dc=undercloud,dc=cf" --user-filter "(&(objectClass=person)(uid=%s))" --admin-filter "(&(memberOf=cn=gitea-admins,ou=groups,dc=undercloud,dc=cf))" --email-attribute mail --avatar-attribute jpegPhoto --synchronize-users --skip-tls-verify --username-attribute uid --bind-dn "cn=gitea,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf" --bind-password $GITEA_PW --attributes-in-bind --firstname-attribute cn --surname-attribute sn'
sleep 30s
echo "wget tea..."
wget http://aux-balancer.undercloud.cf.:3000/undercloud/kube-binaries/raw/branch/main/tea
wget https://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/tea
echo "wget ctea..."
wget http://aux-balancer.undercloud.cf.:3000/undercloud/kube-binaries/raw/branch/main/ctea
wget https://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/ctea
chmod +x tea
chmod +x ctea
#echo "using tea to create login..."
@@ -152,7 +152,7 @@ data:
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 AddUserToTeam undercloud undercloud argocd
sleep 5s
echo "cloning k8aux-apps"
execline-cd /data git clone http://aux-balancer.undercloud.cf.:3000/undercloud/k8aux-apps.git
execline-cd /data git clone http://git.undercloud.local:3000/undercloud/k8aux-apps.git
execline-cd /data/k8aux-apps rm -Rf .git
execline-cd /data/k8aux-apps git init
execline-cd /data/k8aux-apps git config --global user.email "thrawn235@gmail.com"
@@ -174,35 +174,6 @@ data:
echo "startup done."
#exit 123
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -214,7 +185,7 @@ spec:
resources:
requests:
storage: 32Gi
storageClassName: gitea
storageClassName: cephfs-hyper
---
apiVersion: apps/v1
kind: Deployment

View File

@@ -10,10 +10,10 @@ metadata:
spec:
tls:
- hosts:
- gitea.undercloud.cf
- gitea.apps.undercloud.dev
secretName: gitea-tls
rules:
- host: gitea.undercloud.cf
- host: gitea.apps.undercloud.dev
http:
paths:
- path: /

View File

@@ -129,27 +129,27 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /opt/bin/velero
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/velero"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/velero"
- path: /etc/kubernetes/kubeadm-init.yaml
mode: 0644
@@ -373,21 +373,21 @@ systemd:
ExecStart=/bin/sh -c 'echo "witing 30s..."'
ExecStart=/bin/sleep 30s
ExecStart=/bin/sh -c 'echo "create calico namespace..."'
ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml
ExecStart=/bin/sh -c 'echo "install tigera operator..."'
ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml
ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml
ExecStart=-/opt/bin/kubectl create -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml
ExecStart=-/opt/bin/kubectl create -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml
ExecStart=/bin/sh -c 'echo "witing 60s..."'
ExecStart=/bin/sleep 60s
ExecStart=/bin/sh -c 'echo "witing for tigera operator... (20mini max)"'
ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s
ExecStart=/bin/sh -c 'echo "create clico custom ressources..."'
ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml
ExecStart=/bin/sh -c 'echo "witing 3m.."'
ExecStart=/bin/sleep 3m
#ExecStart=/bin/sh -c 'echo "apply calico (calico-apiserver)..."'
#ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml
#ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml
#ExecStart=/bin/sh -c 'echo "witing 1m..."'
#ExecStart=/bin/sleep 2m
ExecStart=/bin/sh -c 'echo "witing calico-apiserver... (20mini max)"'
@@ -395,11 +395,11 @@ systemd:
ExecStart=/bin/sh -c 'echo "witing 120s..."'
ExecStart=/bin/sleep 2m
ExecStart=/bin/sh -c 'echo "apply calico-peers..."'
ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml
ExecStart=/bin/sh -c 'echo "witing 60s..."'
ExecStart=/bin/sleep 1m
ExecStart=/bin/sh -c 'echo "apply calico-ippools..."'
ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml
#ExecStart=/bin/sh -c 'echo "witing for whisker.."'
#ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s
@@ -412,6 +412,76 @@ systemd:
RestartSec=120s
[Install]
WantedBy=multi-user.target
- name: install-ceph.service
enabled: true
contents: |
[Unit]
Wants=kubeadm-init.service
After=kubeadm-init.service
[Service]
StandardOutput=journal+console
StandardError=journal+console
ExecStart=/bin/sh -c 'echo "install.ceph.service started..."'
Environment=KUBECONFIG=/etc/kubernetes/admin.conf
Environment=DATASTORE_TYPE=kubernetes
Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin
Type=oneshot
StandardOutput=journal+console
StandardError=journal+console
ExecStart=/bin/sh -c 'echo "witing 30s..."'
ExecStart=/bin/sleep 30s
ExecStart=/bin/sh -c 'echo "create ceph namespace..."'
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/namespace.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/ceph-conf.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-cephfsplugin-provisioner.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-cephfsplugin.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-config-map.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-encryption-kms-config.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-nodeplugin-rbac.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-provisioner-rbac.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/csi-driver.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/secrets.yaml
ExecStart=-/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/ceph/storage-class.yaml
ExecStart=/usr/bin/systemctl disable install-ceph.service
#RemainAfterExit=true
Restart=on-failure
RestartSec=120s
[Install]
WantedBy=multi-user.target
- name: install-gitea.service
enabled: true
contents: |
[Unit]
Wants=install-ceph.service
After=install-ceph.service
[Service]
StandardOutput=journal+console
StandardError=journal+console
Environment=KUBECONFIG=/etc/kubernetes/admin.conf
Environment=DATASTORE_TYPE=kubernetes
Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin
Type=oneshot
ExecStart=/opt/bin/kubectl wait deployment -n ceph csi-cephfsplugin-provisioner --for condition=Available=True --timeout=1200s
ExecStart=/bin/sleep 4m
ExecStart=/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/gitea/namespace.yaml
ExecStart=/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/gitea/secrets.yaml
ExecStart=/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/gitea/db.yaml
ExecStart=/bin/sleep 60s
ExecStart=/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/gitea/adminer.yaml
ExecStart=/opt/bin/kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/gitea/gitea.yaml
ExecStart=/bin/sleep 3m
ExecStart=/opt/bin/kubectl exec deploy/gitea -n gitea -- /bin/startup.sh
ExecStart=/usr/bin/systemctl disable install-gitea.service
Restart=on-failure
RestartSec=120s
[Install]
WantedBy=multi-user.target
- name: install-argocd.service
enabled: true
contents: |
@@ -430,16 +500,16 @@ systemd:
ExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s
ExecStart=/bin/sleep 1m
ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/install.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/install.yaml
ExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s
ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/repo.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/repo.yaml
ExecStart=/bin/sleep 10s
ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/apps.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/apps.yaml
ExecStart=/bin/sleep 10s
ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/ingress.yaml
ExecStart=/opt/bin/kubectl apply -n argocd -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/ingress.yaml
#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml
@@ -487,9 +557,9 @@ systemd:
kubectl -n kube-system delete svc kube-dns --ignore-not-found ; \
kubectl apply -f /etc/kubernetes/addons/kube-dns-fixed-svc.yaml ; \
kubectl -n argocd delete svc argocd-server --ignore-not-found ; \
kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \
kubectl apply -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \
kubectl -n calico-system delete svc whisker --ignore-not-found || true ; \
kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \
kubectl create -f http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \
echo "[pin-service-ips] done." \
'

View File

@@ -123,22 +123,22 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /etc/kubernetes/kubeadm-join.yaml
mode: 0644

View File

@@ -123,22 +123,22 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /etc/kubernetes/kubeadm-join.yaml
mode: 0644

View File

@@ -35,7 +35,7 @@ resource "proxmox_virtual_environment_download_file" "flatcar_image" {
datastore_id = "cephfs" # oder dein ISO-Storage
node_name = "hyper1"
url = "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/iso/flatcar_production_proxmoxve_image.img"
url = "http://git.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/iso/flatcar_production_proxmoxve_image.img"
file_name = "flatcar_production_proxmoxve_image.qcow2" # wird als ISO gespeichert
}

View File

@@ -119,22 +119,22 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /etc/kubernetes/kubeadm-join.yaml
mode: 0644

View File

@@ -119,22 +119,22 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /etc/kubernetes/kubeadm-join.yaml
mode: 0644

View File

@@ -119,22 +119,22 @@ storage:
- path: /opt/bin/kubeadm
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm"
- path: /opt/bin/kubelet
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet"
- path: /opt/bin/kubectl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl"
- path: /opt/bin/calicoctl
mode: 0755
contents:
source: "http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
source: "http://git.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl"
- path: /etc/kubernetes/kubeadm-join.yaml
mode: 0644