variant: flatcar version: 1.1.0 passwd: users: - name: core ssh_authorized_keys: - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud" storage: directories: - path: /opt/bin overwrite: true mode: 0755 - path: /opt/cni/bin overwrite: true mode: 755 - path: /etc/kubernetes/manifests #overwrite: true mode: 0755 - path: /etc/install-calico overwrite: true mode: 0755 files: - path: /etc/hostname mode: 0644 contents: inline: | control-plane1 - path: /etc/systemd/network/00-eth.network mode: 0644 contents: inline: | [Match] Name=eth* [Network] Address=fd00:0:0:2::91/64 Gateway=fd00:0:0:2::3 DNS=fd00:0:0:1::1 Address=10.0.2.91/24 Gateway=10.0.2.3 DNS=10.0.1.1 Domains=undercloud.local IPv6AcceptRA=no IPv6PrivacyExtensions=no - path: /etc/hosts mode: 0644 overwrite: true contents: inline: | 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fd00:0:0:2::91 control-plane1.undercloud.local control-plane1 fd00:0:0:2::92 control-plane2.undercloud.local control-plane2 fd00:0:0:2::93 control-plane3.undercloud.local control-plane3 fd00:0:0:2::101 worker1.undercloud.local worker1 fd00:0:0:2::102 worker2.undercloud.local worker2 fd00:0:0:2::103 worker3.undercloud.local worker3 10.0.2.91 control-plane1.undercloud.local control-plane1 10.0.2.92 control-plane2.undercloud.local control-plane2 10.0.2.93 control-plane3.undercloud.local control-plane3 10.0.2.101 worker1.undercloud.local worker1 10.0.2.102 worker2.undercloud.local worker2 10.0.2.103 worker3.undercloud.local worker3 - path: /etc/motd mode: 0644 overwrite: true contents: inline: | ******************************************************************* * AUTHORIZED ACCESS ONLY * * * * This system is part of a secured infrastructure. * * All activities are monitored and logged. * * Unauthorized access or misuse is strictly prohibited and * * may result in disciplinary and legal action. * ******************************************************************* -------------------------------------------------------------------------------- kubernetes controle plane Node Manage via: kubectl (kubectl) calico (calicoctl) velero - backup (velero) argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/ -------------------------------------------------------------------------------- - path: /etc/sysctl.d/99-k8s.conf mode: 0644 contents: inline: | net.ipv4.ip_forward = 1 net.ipv6.ip_forward = 1 net.ipv6.conf.all.forwarding = 1 net.ipv4.conf.all.forwarding = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.netfilter.nf_conntrack_max = 1000000 net.ipv4.conf.all.rp_filter = 0 net.ipv6.conf.all.disable_ipv6 = 0 vm.overcommit_memory = 1 fs.inotify.max_user_watches = 524288 fs.inotify.max_user_instances = 512 kernel.panic = 10 kernel.panic_on_oops = 1 - path: /etc/flatcar/update.conf overwrite: true mode: 0420 contents: inline: | REBOOT_STRATEGY=off - path: /opt/bin/kubeadm mode: 0755 contents: source: "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm" - path: /opt/bin/kubelet mode: 0755 contents: source: "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet" - path: /opt/bin/kubectl mode: 0755 contents: source: "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl" - path: /opt/bin/calicoctl mode: 0755 contents: source: "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl" - path: /etc/kubernetes/kubeadm-init.yaml mode: 0644 contents: inline: | apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration bootstrapTokens: - token: "kvg1hc.t3rewovrps426rof" description: "default kubeadm bootstrap token" ttl: "0" nodeRegistration: name: control-plane1 criSocket: unix:///run/containerd/containerd.sock kubeletExtraArgs: node-ip: "fd00:0:0:2::91" cluster-dns: "10.0.91.53,fd00:0:0:f:1::53" volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" localAPIEndpoint: advertiseAddress: "fd00:0:0:2::91" bindPort: 6443 certificateKey: "fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b" --- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration controlPlaneEndpoint: "[fd00:0:0:2::100]:6443" networking: podSubnet: "fd00:0:0:a::/64,10.0.10.0/24" serviceSubnet: "fd00:0:0:f:1::/108,10.0.91.0/24" dnsDomain: "k8s.undercloud.local" controllerManager: extraArgs: flex-volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" bind-address: '::' --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration address: "::" healthzBindAddress: "::" clusterDomain: "k8s.undercloud.local" volumePluginDir: /opt/libexec/kubernetes/kubelet-plugins/volume/exec cgroupDriver: "systemd" - path: /etc/kubernetes/addons/kube-dns-fixed-svc.yaml mode: 0644 contents: inline: | apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns spec: type: ClusterIP ipFamilyPolicy: RequireDualStack ipFamilies: [IPv4, IPv6] clusterIP: 10.0.91.53 clusterIPs: - fd00:0:0:f:1::53 - 10.0.91.53 ports: - name: dns port: 53 protocol: UDP targetPort: 53 - name: dns-tcp port: 53 protocol: TCP targetPort: 53 - name: metrics port: 9153 protocol: TCP targetPort: 9153 selector: k8s-app: kube-dns systemd: units: - name: modules-load.service enabled: true contents: | [Unit] Description=Load necessary kernel modules Before=containerd.service kubeadm-init.service [Service] Type=oneshot ExecStart=/usr/bin/modprobe br_netfilter ExecStart=/usr/bin/modprobe overlay RemainAfterExit=yes [Install] WantedBy=multi-user.target - name: systemd-networkd-wait-online.service enabled: true - name: containerd.service enabled: true contents: | [Unit] Description=containerd container runtime After=network.target modules-load.service [Service] #StandardOutput=journal+console #StandardError=journal+console ExecStart=/usr/bin/containerd Restart=always RestartSec=5 Delegate=yes KillMode=process OOMScoreAdjust=-999 [Install] WantedBy=multi-user.target - name: set-timezone.service enabled: true contents: | [Unit] Description=Set Timezone After=network-online.target Wants=network-online.target [Service] StandardOutput=journal+console StandardError=journal+console ExecStart=/bin/sh -c 'echo "setting timezone to Europe/Berlin"' StandardOutput=journal+console StandardError=journal+console Type=oneshot Restart=on-failure ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin ExecStart=/usr/bin/timedatectl set-ntp true [Install] WantedBy=multi-user.target - name: kubelet.service enabled: true contents: | [Unit] Description=kubelet, the Kubernetes Node Agent Documentation=https://kubernets.io/docs/home Wants=network-online.target After=network-online.target [Service] #StandardOutput=journal+console #StandardError=journal+console #EnvironmentFile=/run/metadata/coreos Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target - name: kubeadm-init.service enabled: true contents: | [Unit] Description=Kubeadm Init Cluster After=network-online.target containerd.service kubelet.service Wants=network-online.target ConditionPathExists=!/etc/kubernetes/kubelet.conf [Service] Type=oneshot StandardOutput=journal+console StandardError=journal+console ExecStart=/bin/sh -c 'echo "kubeadm-init.service started..."' # Environment Environment=KUBECONFIG=/etc/kubernetes/admin.conf Environment=DATASTORE_TYPE=kubernetes Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/ ExecStartPre=/bin/sleep 30s ExecStart=/bin/sh -c 'echo "running kubeadm init..."' ExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml # copy files for kubectl ExecStart=/bin/sh -c 'echo "copying files (admin.conf) to core home folder."' ExecStartPost=/usr/bin/mkdir -p /home/core/.kube ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service Restart=on-failure RestartSec=120s [Install] WantedBy=multi-user.target - name: install-calico.service enabled: true contents: | [Unit] Wants=kubeadm-init.service After=kubeadm-init.service [Service] StandardOutput=journal+console StandardError=journal+console ExecStart=/bin/sh -c 'echo "install.calico.service started..."' Environment=KUBECONFIG=/etc/kubernetes/admin.conf Environment=DATASTORE_TYPE=kubernetes Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin Type=oneshot StandardOutput=journal+console StandardError=journal+console ExecStart=/bin/sh -c 'echo "witing 30s..."' ExecStart=/bin/sleep 30s ExecStart=/bin/sh -c 'echo "create calico namespace..."' ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml ExecStart=/bin/sh -c 'echo "install tigera operator..."' ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml ExecStart=/bin/sh -c 'echo "witing 60s..."' ExecStart=/bin/sleep 60s ExecStart=/bin/sh -c 'echo "witing for tigera operator... (20mini max)"' ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s ExecStart=/bin/sh -c 'echo "create clico custom ressources..."' ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml ExecStart=/bin/sh -c 'echo "witing 3m.."' ExecStart=/bin/sleep 3m #ExecStart=/bin/sh -c 'echo "apply calico (calico-apiserver)..."' #ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml #ExecStart=/bin/sh -c 'echo "witing 1m..."' #ExecStart=/bin/sleep 2m ExecStart=/bin/sh -c 'echo "witing calico-apiserver... (20mini max)"' ExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s ExecStart=/bin/sh -c 'echo "witing 120s..."' ExecStart=/bin/sleep 2m ExecStart=/bin/sh -c 'echo "apply calico-peers..."' ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml ExecStart=/bin/sh -c 'echo "witing 60s..."' ExecStart=/bin/sleep 1m ExecStart=/bin/sh -c 'echo "apply calico-ippools..."' ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml #ExecStart=/bin/sh -c 'echo "witing for whisker.."' #ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s #ExecStart=/bin/sh -c 'echo "port-forward -n calico-system service/whisker 8081:8081"' #ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081 ExecStart=/usr/bin/systemctl disable install-calico.service #RemainAfterExit=true Restart=on-failure RestartSec=120s [Install] WantedBy=multi-user.target - name: install-argocd.service enabled: true contents: | [Unit] Wants=install-calico.service After=install-calico.service [Service] StandardOutput=journal+console StandardError=journal+console Environment=KUBECONFIG=/etc/kubernetes/admin.conf Environment=DATASTORE_TYPE=kubernetes Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin Type=oneshot ExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s ExecStart=/bin/sleep 1m ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/install.yaml ExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s #ExecStart=/bin/sleep 10s #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml ##ExecStart=/bin/sleep 10m #ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s #ExecStart=/bin/sleep 10m #ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml ExecStart=/usr/bin/systemctl disable install-argocd.service Restart=on-failure RestartSec=120s [Install] WantedBy=multi-user.target - name: pin-service-ips.service enabled: true contents: | [Unit] Description=Pin fixed dual-stack ClusterIPs for kube-dns, argocd-server and whisker After=install-argocd.service install-calico.service kubeadm-init.service network-online.target Wants=install-argocd.service install-calico.service kubeadm-init.service network-online.target [Service] Type=oneshot StandardOutput=journal+console StandardError=journal+console Environment=KUBECONFIG=/etc/kubernetes/admin.conf Environment=PATH=/usr/bin:/usr/sbin:/opt/bin ExecStart=/bin/sh -eu -c '\ echo "[pin-service-ips] waiting for API..." ; \ for i in $(seq 1 120); do kubectl get --raw=/readyz >/dev/null 2>&1 && break; sleep 2; done ; \ echo "[pin-service-ips] ensure namespaces exist..." ; \ kubectl get ns kube-system >/dev/null ; \ kubectl get ns argocd >/dev/null 2>&1 || kubectl create ns argocd ; \ kubectl get ns calico-system >/dev/null ; \ echo "[pin-service-ips] wait for coredns/argocd readiness (best effort)..." ; \ kubectl -n kube-system wait deploy coredns --for=condition=Available=True --timeout=300s || true ; \ kubectl -n argocd wait deploy argocd-server --for=condition=Available=True --timeout=600s || true ; \ echo "[pin-service-ips] replace Services with fixed ClusterIPs..." ; \ kubectl -n kube-system delete svc kube-dns --ignore-not-found ; \ kubectl apply -f /etc/kubernetes/addons/kube-dns-fixed-svc.yaml ; \ kubectl -n argocd delete svc argocd-server --ignore-not-found ; \ kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \ kubectl -n calico-system delete svc whisker --ignore-not-found || true ; \ kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \ echo "[pin-service-ips] done." \ ' [Install] WantedBy=multi-user.target