diff --git a/terraform.tfstate b/terraform.tfstate new file mode 100644 index 0000000..68a6a96 --- /dev/null +++ b/terraform.tfstate @@ -0,0 +1,9 @@ +{ + "version": 4, + "terraform_version": "1.12.2", + "serial": 1, + "lineage": "0fe30c18-6249-7171-dfc3-b7765913d1a0", + "outputs": {}, + "resources": [], + "check_results": null +} diff --git a/terraform/terraform.tfstate b/terraform/terraform.tfstate index dcf08e7..7a8f845 100644 --- a/terraform/terraform.tfstate +++ b/terraform/terraform.tfstate @@ -1,9 +1,1495 @@ { "version": 4, "terraform_version": "1.12.2", - "serial": 1873, - "lineage": "751616a2-db32-0edf-7258-3ba00b4868bd", + "serial": 260, + "lineage": "d92c42be-29f9-bad9-ef9a-3dc952ff5fa5", "outputs": {}, - "resources": [], + "resources": [ + { + "mode": "data", + "type": "ct_config", + "name": "control_plane1_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 755\n - path: /etc/kubernetes/manifests\n #overwrite: true\n mode: 0755\n - path: /etc/install-calico\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane1\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::91/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.91/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /opt/bin/velero\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/velero\"\n\n - path: /etc/kubernetes/kubeadm-init.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: InitConfiguration\n bootstrapTokens:\n - token: \"kvg1hc.t3rewovrps426rof\"\n description: \"default kubeadm bootstrap token\"\n ttl: \"0\"\n nodeRegistration:\n name: control-plane1\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::91\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::91\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n ---\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: ClusterConfiguration\n controlPlaneEndpoint: \"[fd00:0:0:2::100]:6443\"\n networking:\n podSubnet: \"fd00:0:0:a::/64,10.0.10.0/24\"\n serviceSubnet: \"fd00:0:0:f:1::/108,10.0.91.0/24\"\n dnsDomain: \"k8s.undercloud.local\"\n controllerManager:\n extraArgs:\n flex-volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n bind-address: '::'\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n volumePluginDir: /opt/libexec/kubernetes/kubelet-plugins/volume/exec\n cgroupDriver: \"systemd\"\n - path: /etc/kubernetes/addons/kube-dns-fixed-svc.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: v1\n kind: Service\n metadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n spec:\n type: ClusterIP\n ipFamilyPolicy: RequireDualStack\n ipFamilies: [IPv6, IPv4]\n clusterIP: fd00:0:0:f:1::53\n clusterIPs:\n - fd00:0:0:f:1::53\n - 10.0.91.53\n ports:\n - name: dns\n port: 53\n protocol: UDP\n targetPort: 53\n - name: dns-tcp\n port: 53\n protocol: TCP\n targetPort: 53\n - name: metrics\n port: 9153\n protocol: TCP\n targetPort: 9153\n selector:\n k8s-app: kube-dns\n\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.target\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"setting timezone to Europe/Berlin\"'\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=multi-user.target\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-init.service\n enabled: true\n contents: |\n [Unit]\n Description=Kubeadm Init Cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n ConditionPathExists=!/etc/kubernetes/kubelet.conf\n\n [Service]\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"kubeadm-init.service started...\"'\n\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n ExecStart=/bin/sh -c 'echo \"running kubeadm init...\"'\n ExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\n \n # copy files for kubectl\n ExecStart=/bin/sh -c 'echo \"copying files (admin.conf) to core home folder.\"'\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: install-calico.service\n enabled: true\n contents: |\n [Unit]\n Wants=kubeadm-init.service\n After=kubeadm-init.service\n\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"install.calico.service started...\"'\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n ExecStart=/bin/sh -c 'echo \"witing 30s...\"'\n ExecStart=/bin/sleep 30s\n ExecStart=/bin/sh -c 'echo \"create calico namespace...\"'\n ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\n ExecStart=/bin/sh -c 'echo \"install tigera operator...\"'\n ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\n ExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 60s\n ExecStart=/bin/sh -c 'echo \"witing for tigera operator... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"create clico custom ressources...\"'\n ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\n \n ExecStart=/bin/sh -c 'echo \"witing 3m..\"'\n ExecStart=/bin/sleep 3m\n #ExecStart=/bin/sh -c 'echo \"apply calico (calico-apiserver)...\"'\n #ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\n #ExecStart=/bin/sh -c 'echo \"witing 1m...\"'\n #ExecStart=/bin/sleep 2m\n ExecStart=/bin/sh -c 'echo \"witing calico-apiserver... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"witing 120s...\"'\n ExecStart=/bin/sleep 2m\n ExecStart=/bin/sh -c 'echo \"apply calico-peers...\"'\n ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 1m\n ExecStart=/bin/sh -c 'echo \"apply calico-ippools...\"'\n ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\n \n #ExecStart=/bin/sh -c 'echo \"witing for whisker..\"'\n #ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\n #ExecStart=/bin/sh -c 'echo \"port-forward -n calico-system service/whisker 8081:8081\"'\n #ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\n \n ExecStart=/usr/bin/systemctl disable install-calico.service\n #RemainAfterExit=true\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: install-argocd.service\n enabled: true\n contents: |\n [Unit]\n Wants=install-calico.service\n After=install-calico.service\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin\n Type=oneshot\n\n ExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\n \n ExecStart=/bin/sleep 1m\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\n ExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\n\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/repo.yaml\n ExecStart=/bin/sleep 10s\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/apps.yaml\n\n #ExecStart=/bin/sleep 10s\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\n ##ExecStart=/bin/sleep 10m\n #ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\n #ExecStart=/bin/sleep 10m\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\n \n ExecStart=/usr/bin/systemctl disable install-argocd.service\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: pin-service-ips.service\n enabled: true\n contents: |\n [Unit]\n Description=Pin fixed dual-stack ClusterIPs for kube-dns, argocd-server and whisker\n After=install-argocd.service install-calico.service kubeadm-init.service network-online.target\n Wants=install-argocd.service install-calico.service kubeadm-init.service network-online.target\n\n [Service]\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=PATH=/usr/bin:/usr/sbin:/opt/bin\n ExecStart=/bin/sh -eu -c '\\\n echo \"[pin-service-ips] waiting for API...\" ; \\\n for i in $(seq 1 120); do kubectl get --raw=/readyz \u003e/dev/null 2\u003e\u00261 \u0026\u0026 break; sleep 2; done ; \\\n echo \"[pin-service-ips] ensure namespaces exist...\" ; \\\n kubectl get ns kube-system \u003e/dev/null ; \\\n kubectl get ns argocd \u003e/dev/null 2\u003e\u00261 || kubectl create ns argocd ; \\\n kubectl get ns calico-system \u003e/dev/null ; \\\n echo \"[pin-service-ips] wait for coredns/argocd readiness (best effort)...\" ; \\\n kubectl -n kube-system wait deploy coredns --for=condition=Available=True --timeout=300s || true ; \\\n kubectl -n argocd wait deploy argocd-server --for=condition=Available=True --timeout=600s || true ; \\\n echo \"[pin-service-ips] replace Services with fixed ClusterIPs...\" ; \\\n kubectl -n kube-system delete svc kube-dns --ignore-not-found ; \\\n kubectl apply -f /etc/kubernetes/addons/kube-dns-fixed-svc.yaml ; \\\n kubectl -n argocd delete svc argocd-server --ignore-not-found ; \\\n kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \\\n kubectl -n calico-system delete svc whisker --ignore-not-found || true ; \\\n kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \\\n echo \"[pin-service-ips] done.\" \\\n '\n\n [Install]\n WantedBy=multi-user.target\n\n", + "id": "3958027050", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/manifests\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMwQuCMBSH7+9P6TA3C8HBDoIRHRKpo3gY2wsl3WKbmv99aJHEO/2+j+9VFxlUU0MhexQYmh1AVWCYrHvUkGnt0Htx15Ty5WLOUxYlBzjJgJOc/8we8uK2EcY5+31glFASk5RF8RZ/4adbByMMctvL1ngxGI1OdXbQpLNKdnAuxyRTCp/hmglj1126dpRqPr4CGt9a4xfxDgAA//+AaSdh1AAAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/velero\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/velero\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yTwWvjOBTG7/orhC691JYdK3GqW7btoZSFsF32ssxBlp5dYUUykuym89cPspNMwxRmGAaDEfh7T9/3fs9i0P+BD9pZjvuxAaEOeb8NuXZ0KhuIokK9torjJ6vjvbOt7kYvonYWNc7FEL0Y/nU92MARxhmO6cwx6aeufJV5rDy8uckPga023rUEYYyxgiC9HuJ8K1HQitHE8/X40ndptpTEaDgmBUHWKfgHOp0UcwOEsRUH4Fg6G70z2WCEhRJhLL1+cbKHyPFo9ZFTSv1oadIJbcGrD8c8ONkjPJswEB+P0Yud7+ZUGKdLMz1wTFpVFDw9K87vysWbNGOI4DNlA8ekLPIivyvzdXV7Ebe85HxdLfLJmfEA2WDGTttMac8xoW6I1OgGjiBp8uAtRAj0ZOckDnSppbOMIOOkMLv906NVg9M2JrNCTeCjDrBTykMIn3putFV75yPHG8YqJFNFq6WI8AzvqQKglhWsZSuFrIHVNZMbaKUo2IYJtlLbu7qtirauiqJihdqs622zlm3DRKUq1hCUZRn65d26XwZ4vV4nnPtE8xIQk/8/pimL4gtPEQiyEN+c77Xt0hQGp17Gxib03+MLzumG3c6A0ouuWBpGAD9pCT/qZ2q0LLa3Z6bnEmXDgzsIPW/6NuSjVeClcaPKZybk7N6A/1tY0YFPruB6q1oDx+zPbUNqmcBm4kz+hvObT0kYiLmcp30NpDwBeV4010AubQnnBL2CMPH161/aqt3Vh9Pf8JP5LNb3c5CHFPo3MiPZeTcOD15PkMYW3kOEgyLoWwAAAP//IhFBidUEAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/addons/kube-dns-fixed-svc.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4yRz0r8MBDH73mKeYBfS8tPxZ3rLkJvwVUv4mE2nZXQtInJtNC3l7hdraC4ye3758MkQ8E+cUzWDwhTrTo7tAh7jpM1rHoWakkIFcBAPSN044GLdkiLkAKZs5rmJNwrAEcHdil3ALrbVFAIq2IKbLInc2CErRuTcGy0ArDhjnrrZu2dNTPCPb+NNvJuJLcXMt1XxHJCeG70dPMPGj1dvSgAcyYhHNuqwnyPWCNe/1+7y1zFT6Es11VZlZu6/BCCj/JZOH3A6e35ZBNhKQKE6MUb7xAed3rRhOIri14HV5xCTPiL9bC9gNWzRGu+z7WpL6Yt0cSOjfj42+LeAwAA//9TMAMrKgIAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\nConditionPathExists=!/etc/kubernetes/kubelet.conf\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.service\\nAfter=kubeadm-init.service\\n\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nExecStart=/bin/sh -c 'echo \\\"witing 30s...\\\"'\\nExecStart=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"create calico namespace...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\\nExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"create clico custom ressources...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\\n\\nExecStart=/bin/sh -c 'echo \\\"witing 3m..\\\"'\\nExecStart=/bin/sleep 3m\\n#ExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\n#ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\n#ExecStart=/bin/sh -c 'echo \\\"witing 1m...\\\"'\\n#ExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\n\\n#ExecStart=/bin/sh -c 'echo \\\"witing for whisker..\\\"'\\n#ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\\n#ExecStart=/bin/sh -c 'echo \\\"port-forward -n calico-system service/whisker 8081:8081\\\"'\\n#ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\\n\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=install-calico.service\\nAfter=install-calico.service\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\n\\nExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\\n\\nExecStart=/bin/sleep 1m\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\\nExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\\n\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/repo.yaml\\nExecStart=/bin/sleep 10s\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/apps.yaml\\n\\n#ExecStart=/bin/sleep 10s\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\\n##ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\\n#ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\\n\\nExecStart=/usr/bin/systemctl disable install-argocd.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-argocd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Pin fixed dual-stack ClusterIPs for kube-dns, argocd-server and whisker\\nAfter=install-argocd.service install-calico.service kubeadm-init.service network-online.target\\nWants=install-argocd.service install-calico.service kubeadm-init.service network-online.target\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=PATH=/usr/bin:/usr/sbin:/opt/bin\\nExecStart=/bin/sh -eu -c '\\\\\\n echo \\\"[pin-service-ips] waiting for API...\\\" ; \\\\\\n for i in $(seq 1 120); do kubectl get --raw=/readyz \\u003e/dev/null 2\\u003e\\u00261 \\u0026\\u0026 break; sleep 2; done ; \\\\\\n echo \\\"[pin-service-ips] ensure namespaces exist...\\\" ; \\\\\\n kubectl get ns kube-system \\u003e/dev/null ; \\\\\\n kubectl get ns argocd \\u003e/dev/null 2\\u003e\\u00261 || kubectl create ns argocd ; \\\\\\n kubectl get ns calico-system \\u003e/dev/null ; \\\\\\n echo \\\"[pin-service-ips] wait for coredns/argocd readiness (best effort)...\\\" ; \\\\\\n kubectl -n kube-system wait deploy coredns --for=condition=Available=True --timeout=300s || true ; \\\\\\n kubectl -n argocd wait deploy argocd-server --for=condition=Available=True --timeout=600s || true ; \\\\\\n echo \\\"[pin-service-ips] replace Services with fixed ClusterIPs...\\\" ; \\\\\\n kubectl -n kube-system delete svc kube-dns --ignore-not-found ; \\\\\\n kubectl apply -f /etc/kubernetes/addons/kube-dns-fixed-svc.yaml ; \\\\\\n kubectl -n argocd delete svc argocd-server --ignore-not-found ; \\\\\\n kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \\\\\\n kubectl -n calico-system delete svc whisker --ignore-not-found || true ; \\\\\\n kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \\\\\\n echo \\\"[pin-service-ips] done.\\\" \\\\\\n'\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"pin-service-ips.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "data", + "type": "ct_config", + "name": "control_plane2_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane2\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::92/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.92/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8s.undercloud.local/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n controlPlane:\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::92\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n nodeRegistration:\n name: control-plane2\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::92\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::53\"\n - \"10.0.91.53\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n\n # copy files for kubectl\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", + "id": "2638982903", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9hCT7CIYyEFwKT1UpD2Kh5BMUapJSaLWf1+0pVLm9L3Hm/qkom4bKNWAEmP7A1CXGGfnbw3kxngMQV4NpWI9LkTGk/QPDirirJYv8wtFedkJE4J9PjBKKOEk4wnf4zd8ddtghEHhBtXZIEdr0OvejYb0TqsejtWU5lrjPZ5zad22K99NSi//j4g2dM6GVTwDAAD//2Xz5MPUAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwWobQQyG7/sUOjoBj68lN5MGWmgTaJJDe5M1ylp4drRImoXt0xd7c+jBNYX4v8xISB/fMLcfT3cL/8j29eXL04+vvx4+w/b+/uH5GZ4ev/08M3iB8d85MV724uCzBw8gDiNagL4BgjM14wxS3ww9rFE043SOsS0FkEImCWEHNIZBq4Qe97FmKNr3nNMlj9eKLfZq8vu4Q8TuoAaDeHM+mnmYUJQZRtO97CTe2X8xBpzB2FsJkApZnGQsUtHmxYJ7XES1pvMeV/jbbn3ldIe2Y6sc7EBaw7QwjAUrw6Nm7rrvWLFnmATvTrMUBVbvl5uOsAgprJbz1Jq4sCmsYYd0aCOslsZNh9YrZdhHjH632Szl2tkmtrRUySdKh0+eWs1sVLTlVJSwbK7/8D8BAAD//2cEMuhrAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST4+bMBDF73wKi3MB8ych61u6u4d2pSpqqr1UPRh7yI4gNhrbdLefvjJE26RSTxUXBG80783vyQmfgRxaI9gQOpD6nA87l6Mt5rIDL+tkQKMF+2zR3FvT4ymQ9GhNoqzxZMfDKA2IhLHRKjnuD58ejZ4sGh+/MSb1DOTRwV5rAucES3vNuYhPJcRdlS6yDo0+WPKCbZumThhTcapHJT08wVucAmhVDRvVK6laaNq2UVvoleTNtpFNpXd3bV/zvq05rxuut5t2121U3zWy1nXTpYmxGr7CCZ1fE0SDRp5BsEuUbIpZqrid8GjVAF6wYPBVFEVBwRRRJ9EA6avX3Fk1JGw53wj+8dWT3NPJrfnj0gynf8RWY3AeKNMmHqbkOc/vynxTf3gX96IUYlOv8tmO4QzZNIYTmkwjCZYWdvLFiB28giqiBzLgwRUXOxexK9bZYpGliUan7Az0Fl121vp4lOmbHcBcuE14BJqB3nGy9Pt1hJLzHyLCWq35ZZSlw3wqX1Tua4KfdqbJNdWWbL+KgnGyh+OA0/3+GWjlu3TPU4Aky7Lk70KO4HO19O62l+Wll0+r5raa8r1rQqTJC8jRv/z6iEbvb35crv9gzxIX7zuXB6OB1GiDzpdC/1F9OcbLZCytOC9F03LRVv0NoOwaYZqoE9kwPRDOEEG5N+fhrFOWrCgOC5iH/4D4OwAA//8ZRx0evQMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "data", + "type": "ct_config", + "name": "control_plane3_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane3\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::93/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.93/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n controlPlane:\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::93\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n nodeRegistration:\n name: control-plane3\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::93\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::53\"\n - \"10.0.91.53\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.target\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=multi-user.target\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n\n # copy files for kubectl\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", + "id": "3713463550", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMscqDMBRG9/so/xAT/REMZBAspUNF2lEcQnKLUk1KErW+fdGWSrnTdw7n1mcZVNtAKQcUGNo/gLrEMFt3byDX2qH34qYp5evFnGdJlP7DUQac5fJjEijK604Y5+z7gVFCSUyyJIr3+APf3TYYYVDYQXbGi9FodKq3oya9VbKHUzWluVL4CJdcGLvtynWTVMvhGdD4zhq/ilcAAAD//8Z6WqLUAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SSQY+bMBCF7/wKi3MBE0jI+pbu7qFdqYqaai9VD8YesiOIjcY23e2vrwxom1TqqeKC4I3mvfmeHPEZyKE1gvWhBakveb93OdpiKlvwskp6NFqwzxbNvTUdngNJj9YkyhpPdjgO0oBIGBusksPh+OnR6NGi8fEbY1JPQB4dHLQmcE6wtNOci/hshLir0lnWotFHS16wXV1XCWMqTnWopIcneItTAI2qYKs6JVUDddPUagedkrze1bLe6P1d01W8ayrOq5rr3bbZt1vVtbWsdFW3aWKshq9wRueXBNGgkRcQbI2SjTHLvJ3wZFUPXrBg8FUURUHBFFEn0QDpq9fcWdUnbD7fAP7x1ZM80Nkt+ePSDMd/xFZDcB4o0yYepuQ5z+/KfFt9eBd3ohRiu8onO4QLZOMQzmgyjSRYWtjRFwO28AqqiB7IgAdXrHZWsSuW2WKWpYlGp+wE9BZdttb6eJTxm+3BrNxGPAFNQO84Wfr9OkLJ+Q8RYS3W/DzK0n46ly8q9xXBTzvR6OrNjmy3iIJxsoNTj+P94Rlo4Tt3z1OAJMuy5O9CDuBzNffutpfl2sunRXNbTfneNSHS5AXk4F9+fUSjDzc/1us/2IvE2fve5cFoIDXYoPO50H9UX07xMhlLN5yXom64aDbdDaDsGmGaqDPZMD4QThBBuTfn4aJTliwojjOYh/+A+DsAAP//txdoCb0DAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "data", + "type": "ct_config", + "name": "worker1_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker1\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::101/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.101/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes worker Node\n\n dont manage\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker1\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::101\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::53\"\n - \"10.0.91.53\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", + "id": "1066535137", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMMeuDMBBH9/so/yH/xBaHQAbBUjpUpB3F4UiuKNWkJFHrty/aUik3/d7jXXXGqJsaCuxJUWz+AKqC4uT8vYbMGE8hqJvhXC6XSCm4+E/3cMRIE84/agd5cd2IkFJ8XwjOOEvYUidb/aHvcB2CCchdj60NarCGvO7cYFjnNHZwKsc005oe8ZIp69Zd+nZEPR+ekWxonQ2LeAUAAP//NFvnsNYAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRT4+bPBDG7/4Uls8vGJLsn9e3dLOXVqqqptpL1YNjD2QE8aCxzWb76SsgapdeKy6IeQb//PzsgC/AESkY2eUTWH8pu8dYIumxPkGyW9Fh8EZ+JAxPFBpsM9uEFEQgD1+hxZiWD0ZIGewFjHwl7oBrIaVjPJLrIBmZA16N1ppz0I5CshiA/bvXMpLrhJwxekjP18R2z22c/ivldFqBg5Gq8VVlpmdjTF3Vah67PscEXPgQjVR1VVbl/3V5t/3vd7oxtTF32yU+Up8vUAx9bjEUHtlIpWlIuscTXMHpCYIDJIj6xnMLR73s6jmmhMfoaAR+mzBPRGmqY/hGHYQF3A54BB6Bn4MfCEMyUn1f36H6Ye53uxtamlel6sa2PrsybRleaeQh7jb3TM0SyiHaBo4dDk/7F2Bs0C0OZOIMoigK8bfZHlLpZoFrwfVN8Kcls3ZsvWeIU6XGKHEG26fzzw8Y/H41uLV/oIvFmf0xljl4YNdT9mVPzvZ/Up+PUzOFVJuqqs3uoTIPm2YlqHivUAnXMuXhwDjCJCq+xQQXr6RYVHyZxRz+QeKvAAAA//+Az5ICBgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "data", + "type": "ct_config", + "name": "worker2_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker2\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::102/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.102/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes worker Node\n\n dont manage\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker2\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::102\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::53\"\n - \"10.0.91.53\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", + "id": "2434431782", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9pBNsouHQA6Cy9JDRdqjeBiSKUo1KUnU+u+LtlTKnL73eFMfMZm2gRIH0pTaL4C6pDT7cG0gtzZQjPpiOVfrSaUEl9/ZL/xjohmXD/UDRXneiVBKvF8IzjiTbK3lXr/oM9yGYAIKP2Dnoh6dpWB6P1rWe4M9HKopy42hWzrl2vltV6Gb0Cx/90Qudt7FVTwCAAD//880QSjWAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRT4+bPBDG7/4Uls8vGEj2z+tbutlLK1VVU+2l6sGxBzKCeNDYZrP99BUQtZteKy6IeQb//PzsiC/AESkY2ecjWH8u+8dYIumpPkKyG9Fj8EZ+JAxPFFrsMtuEFEQgD1+hw5jWD0ZIGewZjHwl7oEbIaVjPJDrIRmZA16M1ppz0I5CshiA/bvXMpLrhVwwBkjPl8R2x12c/yvlfFqBo5Gq9VVl5qcxpq4atYzdkGMCLnyIRqq6Kqvy/7q82/z3O92a2pi7zRqfaMhnKMYhdxgKj2yk0jQmPeARLuD0DMEBEkR95bmGo1539RJTwmN0NAG/zZhHojTXMX6jHsIKbkc8AE/Az8GPhCEZqb7f3qH6Ye632ytaWlal6qeuPrkybRheaeIxbpt7pnYN5RBtC4cex6fdCzC26FYHMnEGURSF+NvsAKl0i8BbwfVV8Kc1c+vYes8Q50qNUeIEdkinnx8w+N3N4Nr+ns4WF/bHWObggd1A2ZcDOTv8SX0+zM0UUjVVVZvtQ2UemvZGUPFeoRKuY8rjnnGCWVR8iwnOXkmxqviyiNn/g8RfAQAA//9dswqFBgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "data", + "type": "ct_config", + "name": "worker3_ignition", + "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker3\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::103/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:1::1\n Address=10.0.2.103/24\n Gateway=10.0.2.3\n DNS=10.0.1.1\n Domains=undercloud.local\n IPv6AcceptRA=no\n IPv6PrivacyExtensions=no\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes worker Node\n\n dont manage\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker3\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::103\"\n cluster-dns: \"10.0.91.53,fd00:0:0:f:1::53\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::53\"\n - \"10.0.91.53\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", + "id": "105441902", + "pretty_print": true, + "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9pBNdPEQyEFwKT1UpD2KhyGZolSTkkSt/75oS6XM6XuPN/UJo24bKHEgRbH9AahLirPztwZyYzyFoK6Gc7leIqXg6W/2BweMNOPypVIoystOhJTi80JwxlnC1jrZ6zd9hdsQTEDhBuxsUKM15HXvRsN6p7GHYzVludZ0j+dcWbftyncT6uX/EcmGztmwimcAAAD//2bu3F/WAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRu47bOhCGez4FwfpIlGzv5bBz1tskQBDEwTZBCpocyQPJHGFIar15+kAXJOu0gRpB84/48f/sgC/AESkY2eUTWH8pu8dYIumxPkGyW9Fh8EZ+JAxPFBpsM9uEFEQgD1+hxZiWD0ZIGewFjHwl7oC3QkrHeCTXQTIyB7warTXnoB2FZDEA+3evZSTXCTlj9JCer4ntnts4/VfK6bQCByNV46vKTM/GmLraqnns+hwTcOFDNFLVVVmV/9fl3fa/3+nG1MbcrfGR+nyBYuhzi6HwyEYqTUPSPZ7gCk5PEBwgQdQrzxqOetnVc0wJj9HRCPw2YZ6I0lTH8I06CAu4HfAIPAI/Bz8QhmSk+n57h+qHud/tVrQ0r0rVjW19dmXaMrzSyEPcbe6ZmiWUQ7QNHDscnvYvwNigWxzIxBlEURTib7M9pNLNAm8F16vgT0vm1rH1niFOlRqjxBlsn84/P2Dw+5vB2v6BLhZn9sdY5uCBXU/Zlz052/9JfT5OzRRSbaqqNruHyjxsmhtBxXuFSriWKQ8HxhEmUfEtJrh4JcWi4sss5vAPEn8FAAD//xZnffgGAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "snippets": null, + "strict": false + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "managed", + "type": "null_resource", + "name": "wait_for_cp1", + "provider": "provider[\"registry.terraform.io/hashicorp/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "4911326915232263828", + "triggers": null + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "null_resource", + "name": "wait_for_cp3", + "provider": "provider[\"registry.terraform.io/hashicorp/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "846646365650442434", + "triggers": null + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane3_ignition", + "null_resource.wait_for_cp1", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane3_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.control_plane3", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_download_file", + "name": "flatcar_image", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "checksum": null, + "checksum_algorithm": null, + "content_type": "import", + "datastore_id": "cephfs", + "decompression_algorithm": null, + "file_name": "flatcar_production_proxmoxve_image.qcow2", + "id": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", + "node_name": "hyper1", + "overwrite": true, + "overwrite_unmanaged": false, + "size": 573243392, + "upload_timeout": 600, + "url": "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/iso/flatcar_production_proxmoxve_image.img", + "verify": true + }, + "sensitive_attributes": [], + "identity_schema_version": 0 + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "control_plane1_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "control-plane1-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/control-plane1-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/manifests\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMwQuCMBSH7+9P6TA3C8HBDoIRHRKpo3gY2wsl3WKbmv99aJHEO/2+j+9VFxlUU0MhexQYmh1AVWCYrHvUkGnt0Htx15Ty5WLOUxYlBzjJgJOc/8we8uK2EcY5+31glFASk5RF8RZ/4adbByMMctvL1ngxGI1OdXbQpLNKdnAuxyRTCp/hmglj1126dpRqPr4CGt9a4xfxDgAA//+AaSdh1AAAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/velero\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/velero\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yTwWvjOBTG7/orhC691JYdK3GqW7btoZSFsF32ssxBlp5dYUUykuym89cPspNMwxRmGAaDEfh7T9/3fs9i0P+BD9pZjvuxAaEOeb8NuXZ0KhuIokK9torjJ6vjvbOt7kYvonYWNc7FEL0Y/nU92MARxhmO6cwx6aeufJV5rDy8uckPga023rUEYYyxgiC9HuJ8K1HQitHE8/X40ndptpTEaDgmBUHWKfgHOp0UcwOEsRUH4Fg6G70z2WCEhRJhLL1+cbKHyPFo9ZFTSv1oadIJbcGrD8c8ONkjPJswEB+P0Yud7+ZUGKdLMz1wTFpVFDw9K87vysWbNGOI4DNlA8ekLPIivyvzdXV7Ebe85HxdLfLJmfEA2WDGTttMac8xoW6I1OgGjiBp8uAtRAj0ZOckDnSppbOMIOOkMLv906NVg9M2JrNCTeCjDrBTykMIn3putFV75yPHG8YqJFNFq6WI8AzvqQKglhWsZSuFrIHVNZMbaKUo2IYJtlLbu7qtirauiqJihdqs622zlm3DRKUq1hCUZRn65d26XwZ4vV4nnPtE8xIQk/8/pimL4gtPEQiyEN+c77Xt0hQGp17Gxib03+MLzumG3c6A0ouuWBpGAD9pCT/qZ2q0LLa3Z6bnEmXDgzsIPW/6NuSjVeClcaPKZybk7N6A/1tY0YFPruB6q1oDx+zPbUNqmcBm4kz+hvObT0kYiLmcp30NpDwBeV4010AubQnnBL2CMPH161/aqt3Vh9Pf8JP5LNb3c5CHFPo3MiPZeTcOD15PkMYW3kOEgyLoWwAAAP//IhFBidUEAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/addons/kube-dns-fixed-svc.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4yRz0r8MBDH73mKeYBfS8tPxZ3rLkJvwVUv4mE2nZXQtInJtNC3l7hdraC4ye3758MkQ8E+cUzWDwhTrTo7tAh7jpM1rHoWakkIFcBAPSN044GLdkiLkAKZs5rmJNwrAEcHdil3ALrbVFAIq2IKbLInc2CErRuTcGy0ArDhjnrrZu2dNTPCPb+NNvJuJLcXMt1XxHJCeG70dPMPGj1dvSgAcyYhHNuqwnyPWCNe/1+7y1zFT6Es11VZlZu6/BCCj/JZOH3A6e35ZBNhKQKE6MUb7xAed3rRhOIri14HV5xCTPiL9bC9gNWzRGu+z7WpL6Yt0cSOjfj42+LeAwAA//9TMAMrKgIAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\nConditionPathExists=!/etc/kubernetes/kubelet.conf\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.service\\nAfter=kubeadm-init.service\\n\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nExecStart=/bin/sh -c 'echo \\\"witing 30s...\\\"'\\nExecStart=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"create calico namespace...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\\nExecStart=-/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"create clico custom ressources...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\\n\\nExecStart=/bin/sh -c 'echo \\\"witing 3m..\\\"'\\nExecStart=/bin/sleep 3m\\n#ExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\n#ExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\n#ExecStart=/bin/sh -c 'echo \\\"witing 1m...\\\"'\\n#ExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=-/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\n\\n#ExecStart=/bin/sh -c 'echo \\\"witing for whisker..\\\"'\\n#ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\\n#ExecStart=/bin/sh -c 'echo \\\"port-forward -n calico-system service/whisker 8081:8081\\\"'\\n#ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\\n\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=install-calico.service\\nAfter=install-calico.service\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\n\\nExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\\n\\nExecStart=/bin/sleep 1m\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\\nExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\\n\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/repo.yaml\\nExecStart=/bin/sleep 10s\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/apps.yaml\\n\\n#ExecStart=/bin/sleep 10s\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\\n##ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\\n#ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\\n\\nExecStart=/usr/bin/systemctl disable install-argocd.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-argocd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Pin fixed dual-stack ClusterIPs for kube-dns, argocd-server and whisker\\nAfter=install-argocd.service install-calico.service kubeadm-init.service network-online.target\\nWants=install-argocd.service install-calico.service kubeadm-init.service network-online.target\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=PATH=/usr/bin:/usr/sbin:/opt/bin\\nExecStart=/bin/sh -eu -c '\\\\\\n echo \\\"[pin-service-ips] waiting for API...\\\" ; \\\\\\n for i in $(seq 1 120); do kubectl get --raw=/readyz \\u003e/dev/null 2\\u003e\\u00261 \\u0026\\u0026 break; sleep 2; done ; \\\\\\n echo \\\"[pin-service-ips] ensure namespaces exist...\\\" ; \\\\\\n kubectl get ns kube-system \\u003e/dev/null ; \\\\\\n kubectl get ns argocd \\u003e/dev/null 2\\u003e\\u00261 || kubectl create ns argocd ; \\\\\\n kubectl get ns calico-system \\u003e/dev/null ; \\\\\\n echo \\\"[pin-service-ips] wait for coredns/argocd readiness (best effort)...\\\" ; \\\\\\n kubectl -n kube-system wait deploy coredns --for=condition=Available=True --timeout=300s || true ; \\\\\\n kubectl -n argocd wait deploy argocd-server --for=condition=Available=True --timeout=600s || true ; \\\\\\n echo \\\"[pin-service-ips] replace Services with fixed ClusterIPs...\\\" ; \\\\\\n kubectl -n kube-system delete svc kube-dns --ignore-not-found ; \\\\\\n kubectl apply -f /etc/kubernetes/addons/kube-dns-fixed-svc.yaml ; \\\\\\n kubectl -n argocd delete svc argocd-server --ignore-not-found ; \\\\\\n kubectl apply -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/argocd/service.yaml ; \\\\\\n kubectl -n calico-system delete svc whisker --ignore-not-found || true ; \\\\\\n kubectl create -f http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/calico-config/whisker.yaml || true ; \\\\\\n echo \\\"[pin-service-ips] done.\\\" \\\\\\n'\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"pin-service-ips.service\"\n }\n ]\n }\n}", + "file_name": "control-plane1-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "control_plane2_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "control-plane2-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/control-plane2-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9hCT7CIYyEFwKT1UpD2Kh5BMUapJSaLWf1+0pVLm9L3Hm/qkom4bKNWAEmP7A1CXGGfnbw3kxngMQV4NpWI9LkTGk/QPDirirJYv8wtFedkJE4J9PjBKKOEk4wnf4zd8ddtghEHhBtXZIEdr0OvejYb0TqsejtWU5lrjPZ5zad22K99NSi//j4g2dM6GVTwDAAD//2Xz5MPUAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwWobQQyG7/sUOjoBj68lN5MGWmgTaJJDe5M1ylp4drRImoXt0xd7c+jBNYX4v8xISB/fMLcfT3cL/8j29eXL04+vvx4+w/b+/uH5GZ4ev/08M3iB8d85MV724uCzBw8gDiNagL4BgjM14wxS3ww9rFE043SOsS0FkEImCWEHNIZBq4Qe97FmKNr3nNMlj9eKLfZq8vu4Q8TuoAaDeHM+mnmYUJQZRtO97CTe2X8xBpzB2FsJkApZnGQsUtHmxYJ7XES1pvMeV/jbbn3ldIe2Y6sc7EBaw7QwjAUrw6Nm7rrvWLFnmATvTrMUBVbvl5uOsAgprJbz1Jq4sCmsYYd0aCOslsZNh9YrZdhHjH632Szl2tkmtrRUySdKh0+eWs1sVLTlVJSwbK7/8D8BAAD//2cEMuhrAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST4+bMBDF73wKi3MB8ych61u6u4d2pSpqqr1UPRh7yI4gNhrbdLefvjJE26RSTxUXBG80783vyQmfgRxaI9gQOpD6nA87l6Mt5rIDL+tkQKMF+2zR3FvT4ymQ9GhNoqzxZMfDKA2IhLHRKjnuD58ejZ4sGh+/MSb1DOTRwV5rAucES3vNuYhPJcRdlS6yDo0+WPKCbZumThhTcapHJT08wVucAmhVDRvVK6laaNq2UVvoleTNtpFNpXd3bV/zvq05rxuut5t2121U3zWy1nXTpYmxGr7CCZ1fE0SDRp5BsEuUbIpZqrid8GjVAF6wYPBVFEVBwRRRJ9EA6avX3Fk1JGw53wj+8dWT3NPJrfnj0gynf8RWY3AeKNMmHqbkOc/vynxTf3gX96IUYlOv8tmO4QzZNIYTmkwjCZYWdvLFiB28giqiBzLgwRUXOxexK9bZYpGliUan7Az0Fl121vp4lOmbHcBcuE14BJqB3nGy9Pt1hJLzHyLCWq35ZZSlw3wqX1Tua4KfdqbJNdWWbL+KgnGyh+OA0/3+GWjlu3TPU4Aky7Lk70KO4HO19O62l+Wll0+r5raa8r1rQqTJC8jRv/z6iEbvb35crv9gzxIX7zuXB6OB1GiDzpdC/1F9OcbLZCytOC9F03LRVv0NoOwaYZqoE9kwPRDOEEG5N+fhrFOWrCgOC5iH/4D4OwAA//8ZRx0evQMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "file_name": "control-plane2-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane2_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "control_plane3_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "control-plane3-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/control-plane3-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMscqDMBRG9/so/xAT/REMZBAspUNF2lEcQnKLUk1KErW+fdGWSrnTdw7n1mcZVNtAKQcUGNo/gLrEMFt3byDX2qH34qYp5evFnGdJlP7DUQac5fJjEijK604Y5+z7gVFCSUyyJIr3+APf3TYYYVDYQXbGi9FodKq3oya9VbKHUzWluVL4CJdcGLvtynWTVMvhGdD4zhq/ilcAAAD//8Z6WqLUAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SSQY+bMBCF7/wKi3MBE0jI+pbu7qFdqYqaai9VD8YesiOIjcY23e2vrwxom1TqqeKC4I3mvfmeHPEZyKE1gvWhBakveb93OdpiKlvwskp6NFqwzxbNvTUdngNJj9YkyhpPdjgO0oBIGBusksPh+OnR6NGi8fEbY1JPQB4dHLQmcE6wtNOci/hshLir0lnWotFHS16wXV1XCWMqTnWopIcneItTAI2qYKs6JVUDddPUagedkrze1bLe6P1d01W8ayrOq5rr3bbZt1vVtbWsdFW3aWKshq9wRueXBNGgkRcQbI2SjTHLvJ3wZFUPXrBg8FUURUHBFFEn0QDpq9fcWdUnbD7fAP7x1ZM80Nkt+ePSDMd/xFZDcB4o0yYepuQ5z+/KfFt9eBd3ohRiu8onO4QLZOMQzmgyjSRYWtjRFwO28AqqiB7IgAdXrHZWsSuW2WKWpYlGp+wE9BZdttb6eJTxm+3BrNxGPAFNQO84Wfr9OkLJ+Q8RYS3W/DzK0n46ly8q9xXBTzvR6OrNjmy3iIJxsoNTj+P94Rlo4Tt3z1OAJMuy5O9CDuBzNffutpfl2sunRXNbTfneNSHS5AXk4F9+fUSjDzc/1us/2IvE2fve5cFoIDXYoPO50H9UX07xMhlLN5yXom64aDbdDaDsGmGaqDPZMD4QThBBuTfn4aJTliwojjOYh/+A+DsAAP//txdoCb0DAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "file_name": "control-plane3-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane3_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "worker1_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "worker1-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/worker1-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMMeuDMBBH9/so/yH/xBaHQAbBUjpUpB3F4UiuKNWkJFHrty/aUik3/d7jXXXGqJsaCuxJUWz+AKqC4uT8vYbMGE8hqJvhXC6XSCm4+E/3cMRIE84/agd5cd2IkFJ8XwjOOEvYUidb/aHvcB2CCchdj60NarCGvO7cYFjnNHZwKsc005oe8ZIp69Zd+nZEPR+ekWxonQ2LeAUAAP//NFvnsNYAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRT4+bPBDG7/4Uls8vGJLsn9e3dLOXVqqqptpL1YNjD2QE8aCxzWb76SsgapdeKy6IeQb//PzsgC/AESkY2eUTWH8pu8dYIumxPkGyW9Fh8EZ+JAxPFBpsM9uEFEQgD1+hxZiWD0ZIGewFjHwl7oBrIaVjPJLrIBmZA16N1ppz0I5CshiA/bvXMpLrhJwxekjP18R2z22c/ivldFqBg5Gq8VVlpmdjTF3Vah67PscEXPgQjVR1VVbl/3V5t/3vd7oxtTF32yU+Up8vUAx9bjEUHtlIpWlIuscTXMHpCYIDJIj6xnMLR73s6jmmhMfoaAR+mzBPRGmqY/hGHYQF3A54BB6Bn4MfCEMyUn1f36H6Ye53uxtamlel6sa2PrsybRleaeQh7jb3TM0SyiHaBo4dDk/7F2Bs0C0OZOIMoigK8bfZHlLpZoFrwfVN8Kcls3ZsvWeIU6XGKHEG26fzzw8Y/H41uLV/oIvFmf0xljl4YNdT9mVPzvZ/Up+PUzOFVJuqqs3uoTIPm2YlqHivUAnXMuXhwDjCJCq+xQQXr6RYVHyZxRz+QeKvAAAA//+Az5ICBgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "file_name": "worker1-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.worker1_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "worker2_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "worker2-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/worker2-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9pBNsouHQA6Cy9JDRdqjeBiSKUo1KUnU+u+LtlTKnL73eFMfMZm2gRIH0pTaL4C6pDT7cG0gtzZQjPpiOVfrSaUEl9/ZL/xjohmXD/UDRXneiVBKvF8IzjiTbK3lXr/oM9yGYAIKP2Dnoh6dpWB6P1rWe4M9HKopy42hWzrl2vltV6Gb0Cx/90Qudt7FVTwCAAD//880QSjWAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRT4+bPBDG7/4Uls8vGEj2z+tbutlLK1VVU+2l6sGxBzKCeNDYZrP99BUQtZteKy6IeQb//PzsiC/AESkY2ecjWH8u+8dYIumpPkKyG9Fj8EZ+JAxPFFrsMtuEFEQgD1+hw5jWD0ZIGewZjHwl7oEbIaVjPJDrIRmZA16M1ppz0I5CshiA/bvXMpLrhVwwBkjPl8R2x12c/yvlfFqBo5Gq9VVl5qcxpq4atYzdkGMCLnyIRqq6Kqvy/7q82/z3O92a2pi7zRqfaMhnKMYhdxgKj2yk0jQmPeARLuD0DMEBEkR95bmGo1539RJTwmN0NAG/zZhHojTXMX6jHsIKbkc8AE/Az8GPhCEZqb7f3qH6Ye632ytaWlal6qeuPrkybRheaeIxbpt7pnYN5RBtC4cex6fdCzC26FYHMnEGURSF+NvsAKl0i8BbwfVV8Kc1c+vYes8Q50qNUeIEdkinnx8w+N3N4Nr+ns4WF/bHWObggd1A2ZcDOTv8SX0+zM0UUjVVVZvtQ2UemvZGUPFeoRKuY8rjnnGCWVR8iwnOXkmxqviyiNn/g8RfAQAA//9dswqFBgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "file_name": "worker2-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.worker2_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_file", + "name": "worker3_ignition", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content_type": "snippets", + "datastore_id": "cephfs", + "file_mode": null, + "file_modification_date": null, + "file_name": "worker3-ignition-user-data", + "file_size": null, + "file_tag": null, + "id": "cephfs:snippets/worker3-ignition-user-data", + "node_name": "hyper1", + "overwrite": true, + "source_file": [], + "source_raw": [ + { + "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/1SMQYuDMBBG7/NT9pBNdPEQyEFwKT1UpD2KhyGZolSTkkSt/75oS6XM6XuPN/UJo24bKHEgRbH9AahLirPztwZyYzyFoK6Gc7leIqXg6W/2BweMNOPypVIoystOhJTi80JwxlnC1jrZ6zd9hdsQTEDhBuxsUKM15HXvRsN6p7GHYzVludZ0j+dcWbftyncT6uX/EcmGztmwimcAAAD//2bu3F/WAAAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwUoDMRCG7/sU/7lg36HUgoK0YNuD3qbJdDs0m5SZibI+vaz14GEtgvud83/5YGb/p5nhFxb73cPm+fF1dY/FcrnabrFZP72MPLzh+DNfjt1JDNabcwcxXEgd5QiCcajKEZKPSuZag1fl+ZhjkRIouLyJCxtIGV3J4mXYU45IpW05zm917DNVPxWVj2ETApuhKDqxajyUmasETz0uWk5yEP92/3B01EPZanJIRhQLckmSSftrBbd0DS15Pt4xwW2bu4lpzvXAmtnZ8F70zIp1idw0sWRHR5lanv7TzwAAAP//EeTc++cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/Undercloud/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRu47bOhCGez4FwfpIlGzv5bBz1tskQBDEwTZBCpocyQPJHGFIar15+kAXJOu0gRpB84/48f/sgC/AESkY2eUTWH8pu8dYIumxPkGyW9Fh8EZ+JAxPFBpsM9uEFEQgD1+hxZiWD0ZIGewFjHwl7oC3QkrHeCTXQTIyB7warTXnoB2FZDEA+3evZSTXCTlj9JCer4ntnts4/VfK6bQCByNV46vKTM/GmLraqnns+hwTcOFDNFLVVVmV/9fl3fa/3+nG1MbcrfGR+nyBYuhzi6HwyEYqTUPSPZ7gCk5PEBwgQdQrzxqOetnVc0wJj9HRCPw2YZ6I0lTH8I06CAu4HfAIPAI/Bz8QhmSk+n57h+qHud/tVrQ0r0rVjW19dmXaMrzSyEPcbe6ZmiWUQ7QNHDscnvYvwNigWxzIxBlEURTib7M9pNLNAm8F16vgT0vm1rH1niFOlRqjxBlsn84/P2Dw+5vB2v6BLhZn9sdY5uCBXU/Zlz052/9JfT5OzRRSbaqqNruHyjxsmhtBxXuFSriWKQ8HxhEmUfEtJrh4JcWi4sss5vAPEn8FAAD//xZnffgGAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", + "file_name": "worker3-ignition-user-data", + "resize": 0 + } + ], + "timeout_upload": 1800 + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.worker3_ignition" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "control_plane1", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes control-plane1", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "104", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/control-plane1-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:90:E8:8F" + ], + "machine": null, + "memory": [ + { + "dedicated": 3072, + "floating": 3072, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "control-plane1", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:90:E8:8F", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper1", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "control-plane", + "flatcar", + "kubernetes", + "terraform" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 104, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "control_plane2", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes control-plane2", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "107", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/control-plane2-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:17:EA:7E" + ], + "machine": null, + "memory": [ + { + "dedicated": 3072, + "floating": 3072, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "control-plane2", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:17:EA:7E", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper2", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "control-plane", + "flatcar", + "kubernetes", + "terraform" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 107, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane2_ignition", + "null_resource.wait_for_cp1", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane2_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "control_plane3", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes control-plane3", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "108", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/control-plane3-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:C2:94:6A" + ], + "machine": null, + "memory": [ + { + "dedicated": 3072, + "floating": 3072, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "control-plane3", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:C2:94:6A", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper3", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "control-plane", + "flatcar", + "kubernetes", + "terraform" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 108, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane3_ignition", + "null_resource.wait_for_cp1", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane3_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "flatcar_template", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": [ + "scsi0", + "ide2" + ], + "cdrom": [], + "clone": [], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 1, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "managed by terraform - base template for flatcar", + "disk": [ + { + "aio": "io_uring", + "backup": true, + "cache": "none", + "datastore_id": "Pool1", + "discard": "on", + "file_format": "raw", + "file_id": "", + "import_from": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", + "interface": "virtio0", + "iothread": false, + "path_in_datastore": "vm-103-disk-0", + "replicate": true, + "serial": "", + "size": 50, + "speed": [], + "ssd": false + } + ], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "103", + "initialization": [ + { + "datastore_id": "Pool1", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": "", + "mac_addresses": [ + "BC:24:11:58:99:FF" + ], + "machine": "", + "memory": [ + { + "dedicated": 2048, + "floating": 2048, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "flatcar-template", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:58:99:FF", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper1", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": null, + "startup": [], + "stop_on_destroy": true, + "tablet_device": true, + "tags": [ + "flatcar", + "kubernetes", + "terraform" + ], + "template": true, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 103, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "proxmox_virtual_environment_download_file.flatcar_image" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "worker1", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes worker1", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "109", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/worker1-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:06:63:61" + ], + "machine": null, + "memory": [ + { + "dedicated": 8192, + "floating": 8192, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "worker1", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:06:63:61", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper1", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "flatcar", + "kubernetes", + "terraform", + "worker" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 109, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane3_ignition", + "data.ct_config.worker1_ignition", + "null_resource.wait_for_cp1", + "null_resource.wait_for_cp3", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane3_ignition", + "proxmox_virtual_environment_file.worker1_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.control_plane3", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "worker2", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes worker2", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "112", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/worker2-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:0E:DC:6F" + ], + "machine": null, + "memory": [ + { + "dedicated": 8192, + "floating": 8192, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "worker2", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:0E:DC:6F", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper2", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "flatcar", + "kubernetes", + "terraform", + "worker" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 112, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane3_ignition", + "data.ct_config.worker2_ignition", + "null_resource.wait_for_cp1", + "null_resource.wait_for_cp3", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane3_ignition", + "proxmox_virtual_environment_file.worker2_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.control_plane3", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + }, + { + "mode": "managed", + "type": "proxmox_virtual_environment_vm", + "name": "worker3", + "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "acpi": true, + "agent": [], + "amd_sev": [], + "audio_device": [], + "bios": "seabios", + "boot_order": null, + "cdrom": [], + "clone": [ + { + "datastore_id": "", + "full": true, + "node_name": "hyper1", + "retries": 1, + "vm_id": 103 + } + ], + "cpu": [ + { + "affinity": "", + "architecture": "", + "cores": 2, + "flags": null, + "hotplugged": 0, + "limit": 0, + "numa": false, + "sockets": 1, + "type": "host", + "units": 1024 + } + ], + "description": "kubernetes worker3", + "disk": [], + "efi_disk": [], + "hook_script_file_id": null, + "hostpci": [], + "id": "113", + "initialization": [ + { + "datastore_id": "local-lvm", + "dns": [], + "interface": "ide2", + "ip_config": [], + "meta_data_file_id": "", + "network_data_file_id": "", + "type": "", + "user_account": [], + "user_data_file_id": "cephfs:snippets/worker3-ignition-user-data", + "vendor_data_file_id": "" + } + ], + "ipv4_addresses": [], + "ipv6_addresses": [], + "keyboard_layout": "en-us", + "kvm_arguments": null, + "mac_addresses": [ + "BC:24:11:0F:F8:42" + ], + "machine": null, + "memory": [ + { + "dedicated": 8192, + "floating": 8192, + "hugepages": "", + "keep_hugepages": false, + "shared": 0 + } + ], + "migrate": false, + "name": "worker3", + "network_device": [ + { + "bridge": "vmbr0", + "disconnected": false, + "enabled": true, + "firewall": false, + "mac_address": "BC:24:11:0F:F8:42", + "model": "virtio", + "mtu": 0, + "queues": 0, + "rate_limit": 0, + "trunks": "", + "vlan_id": 0 + } + ], + "network_interface_names": [], + "node_name": "hyper3", + "numa": [], + "on_boot": true, + "operating_system": [], + "pool_id": null, + "protection": false, + "reboot": false, + "reboot_after_update": true, + "rng": [], + "scsi_hardware": "virtio-scsi-pci", + "serial_device": [], + "smbios": [], + "started": true, + "startup": [], + "stop_on_destroy": false, + "tablet_device": true, + "tags": [ + "flatcar", + "kubernetes", + "terraform", + "worker" + ], + "template": false, + "timeout_clone": 1800, + "timeout_create": 1800, + "timeout_migrate": 1800, + "timeout_move_disk": 1800, + "timeout_reboot": 1800, + "timeout_shutdown_vm": 1800, + "timeout_start_vm": 1800, + "timeout_stop_vm": 300, + "tpm_state": [], + "usb": [], + "vga": [], + "virtiofs": [], + "vm_id": 113, + "watchdog": [] + }, + "sensitive_attributes": [], + "identity_schema_version": 0, + "private": "bnVsbA==", + "dependencies": [ + "data.ct_config.control_plane1_ignition", + "data.ct_config.control_plane3_ignition", + "data.ct_config.worker3_ignition", + "null_resource.wait_for_cp1", + "null_resource.wait_for_cp3", + "proxmox_virtual_environment_download_file.flatcar_image", + "proxmox_virtual_environment_file.control_plane1_ignition", + "proxmox_virtual_environment_file.control_plane3_ignition", + "proxmox_virtual_environment_file.worker3_ignition", + "proxmox_virtual_environment_vm.control_plane1", + "proxmox_virtual_environment_vm.control_plane3", + "proxmox_virtual_environment_vm.flatcar_template" + ] + } + ] + } + ], "check_results": null } diff --git a/terraform/terraform.tfstate.1754134474.backup b/terraform/terraform.tfstate.1754134474.backup deleted file mode 100644 index 95f9f7d..0000000 --- a/terraform/terraform.tfstate.1754134474.backup +++ /dev/null @@ -1,397 +0,0 @@ -{ - "version": 4, - "terraform_version": "1.12.2", - "serial": 1153, - "lineage": "751616a2-db32-0edf-7258-3ba00b4868bd", - "outputs": {}, - "resources": [ - { - "mode": "data", - "type": "ct_config", - "name": "control_plane1_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane1\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::91/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.91/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-init.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: InitConfiguration\n bootstrapTokens:\n - token: \"kvg1hc.t3rewovrps426rof\"\n description: \"default kubeadm bootstrap token\"\n ttl: \"0\"\n nodeRegistration:\n name: control-plane1\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::91\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::91\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n ---\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: ClusterConfiguration\n controlPlaneEndpoint: \"[fd00:0:0:2::100]:6443\"\n networking:\n podSubnet: \"fd00:10:244::/56,10.244.0.0/16\"\n serviceSubnet: \"fd00:10:96::/112,10.96.0.0/12\"\n dnsDomain: \"k8s.undercloud.local\"\n controllerManager:\n extraArgs:\n flex-volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n bind-address: '::'\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n volumePluginDir: /opt/libexec/kubernetes/kubelet-plugins/volume/exec\n cgroupDriver: \"systemd\"\n\n - path: /etc/kubernetes/calico.yaml\n mode: 0644\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"setting timezone to Europe/Berlin\"'\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"kubelet.service start\"'\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n ExecStart=/bin/sh -c 'echo \"kubelet started\"'\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-init.service\n enabled: true\n contents: |\n [Unit]\n Description=Kubeadm Init Cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"kubeadm-init.service started...\"'\n\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n ExecStart=/bin/sh -c 'echo \"running kubeadm init...\"'\n ExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\n \n # copy files for kubectl\n ExecStart=/bin/sh -c 'echo \"copying files (admin.conf) to core home folder.\"'\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: install-calico.service\n enabled: true\n contents: |\n [Unit]\n Wants=kubeadm-init.target\n After=kubeadm-init.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"install.calico.service started...\"'\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n #ExecStartPre=/bin/sleep 120s\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\n ExecStart=/bin/sh -c 'echo \"install tigera operator...\"'\n ExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 60s\n ExecStart=/bin/sh -c 'echo \"witing for tigera operator... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"apply calico (calico-apiserver)...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\n ExecStart=/bin/sh -c 'echo \"witing 10m...\"'\n ExecStart=/bin/sleep 10m\n ExecStart=/bin/sh -c 'echo \"witing calico-apiserver... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"witing 120s...\"'\n ExecStart=/bin/sleep 2m\n ExecStart=/bin/sh -c 'echo \"apply calico-peers...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 1m\n ExecStart=/bin/sh -c 'echo \"apply calico-ippools...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\n ExecStart=/usr/bin/systemctl disable install-calico.service\n #RemainAfterExit=true\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target", - "id": "1903625485", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS0N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LQ30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//szh4MqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6ySQW/bOBCF7/wVBC+5rETKomWbN2+SQxAsYGwWeyl6oMiRQogmBZJynP76gpITxGiAFkWhCwHNezNvvpGj+R9CNN4JPEwtSH0sh20sjaenqoUkazQYpwV+cCbdeteZfgoyGe9Q632KKcjxPz+AiwJhXOCU3wKT4dRXz6pMdYAXfwpj5Ksm+I4gjDHWEFUwY5q7Eg2dnGx6a4/ffRezRZKSFZgwgpzX8C/0JlfMBghjJ48gsPIuBW+L0UoHFcJYBfPk1QBJ4MmZs6CUhsnRXCeNg6A/PMvo1YDwPISFdH9OQe5DP6fCODctzCgw6TRjIn8rIXbVMtvJ2+kIxWin3rhCmyAwoX5M1JoWzqBoNg0OEkR68b8UR7po6VxGkPVK2v3h4d7p0RuXcnepTxCSibDXOkCMnw7RGqcPPiSBG85rpLKiM0omeITXrADYqBrWqlNSbYBvNlw10CnJeMMlX+ntbtPVrNvUjNWc6Wa92bZr1bVc1rrmLUFFUaBfPpZbO8UE4fpeLnwOGc97QEy+fExTMfZV5AgEOUgvPgzG9XkLo9dPU+syyyV+xcSKcyHouvmrYuWK85KVjFZNXkeEcDIKflDsGiFoVa2yYtcsglUWaBfv/FGa+XS3sZychqCsn3Q5MyFv01sI/0gnewh5Krg+k87Cufhz15AtM9hCvpG/EeLmUxIWUqnmbV8DqS5AHpeaayDvtkQIgp5B2vT87W/j9P7qh1pg/mQ/y+iHOchdDv0bmZHqg5/Gu2BOkNcWX2OCoyboewAAAP//gIWCkqYEAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/calico.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubelet.service start\\\"'\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nExecStart=/bin/sh -c 'echo \\\"kubelet started\\\"'\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.target\\nAfter=kubeadm-init.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n#ExecStartPre=/bin/sleep 120s\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 10m...\\\"'\\nExecStart=/bin/sleep 10m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_download_file", - "name": "flatcar_image", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "checksum": null, - "checksum_algorithm": null, - "content_type": "import", - "datastore_id": "cephfs", - "decompression_algorithm": null, - "file_name": "flatcar_production_proxmoxve_image.qcow2", - "id": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", - "node_name": "hyper1", - "overwrite": true, - "overwrite_unmanaged": false, - "size": 573243392, - "upload_timeout": 600, - "url": "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/iso/flatcar_production_proxmoxve_image.img", - "verify": true - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "eyJvcmlnaW5hbF9zdGF0ZV9zaXplIjoiTlRjek1qUXpNemt5In0=" - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "control_plane1_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "control-plane1-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/control-plane1-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS0N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LQ30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//szh4MqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6ySQW/bOBCF7/wVBC+5rETKomWbN2+SQxAsYGwWeyl6oMiRQogmBZJynP76gpITxGiAFkWhCwHNezNvvpGj+R9CNN4JPEwtSH0sh20sjaenqoUkazQYpwV+cCbdeteZfgoyGe9Q632KKcjxPz+AiwJhXOCU3wKT4dRXz6pMdYAXfwpj5Ksm+I4gjDHWEFUwY5q7Eg2dnGx6a4/ffRezRZKSFZgwgpzX8C/0JlfMBghjJ48gsPIuBW+L0UoHFcJYBfPk1QBJ4MmZs6CUhsnRXCeNg6A/PMvo1YDwPISFdH9OQe5DP6fCODctzCgw6TRjIn8rIXbVMtvJ2+kIxWin3rhCmyAwoX5M1JoWzqBoNg0OEkR68b8UR7po6VxGkPVK2v3h4d7p0RuXcnepTxCSibDXOkCMnw7RGqcPPiSBG85rpLKiM0omeITXrADYqBrWqlNSbYBvNlw10CnJeMMlX+ntbtPVrNvUjNWc6Wa92bZr1bVc1rrmLUFFUaBfPpZbO8UE4fpeLnwOGc97QEy+fExTMfZV5AgEOUgvPgzG9XkLo9dPU+syyyV+xcSKcyHouvmrYuWK85KVjFZNXkeEcDIKflDsGiFoVa2yYtcsglUWaBfv/FGa+XS3sZychqCsn3Q5MyFv01sI/0gnewh5Krg+k87Cufhz15AtM9hCvpG/EeLmUxIWUqnmbV8DqS5AHpeaayDvtkQIgp5B2vT87W/j9P7qh1pg/mQ/y+iHOchdDv0bmZHqg5/Gu2BOkNcWX2OCoyboewAAAP//gIWCkqYEAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/calico.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubelet.service start\\\"'\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nExecStart=/bin/sh -c 'echo \\\"kubelet started\\\"'\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.target\\nAfter=kubeadm-init.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n#ExecStartPre=/bin/sleep 120s\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 10m...\\\"'\\nExecStart=/bin/sleep 10m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n }\n ]\n }\n}", - "file_name": "control-plane1-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "control_plane1", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": [], - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes control-plane1", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "104", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/control-plane1-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:96:FC:F3" - ], - "machine": null, - "memory": [ - { - "dedicated": 2048, - "floating": 2048, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "control-plane1", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:96:FC:F3", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper1", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "control-plane", - "flatcar", - "kubernetes", - "terraform" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 104, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "flatcar_template", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": [ - "scsi0", - "ide2" - ], - "cdrom": [], - "clone": [], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 1, - "flags": [], - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "managed by terraform - base template for flatcar", - "disk": [ - { - "aio": "io_uring", - "backup": true, - "cache": "none", - "datastore_id": "Pool1", - "discard": "on", - "file_format": "raw", - "file_id": "", - "import_from": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", - "interface": "virtio0", - "iothread": false, - "path_in_datastore": "vm-103-disk-0", - "replicate": true, - "serial": "", - "size": 10, - "speed": [], - "ssd": false - } - ], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "103", - "initialization": [ - { - "datastore_id": "Pool1", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": "", - "mac_addresses": [ - "BC:24:11:7E:19:32" - ], - "machine": "", - "memory": [ - { - "dedicated": 2048, - "floating": 2048, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "flatcar-template", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:7E:19:32", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper1", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": null, - "startup": [], - "stop_on_destroy": true, - "tablet_device": true, - "tags": [ - "flatcar", - "kubernetes", - "terraform" - ], - "template": true, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 103, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "proxmox_virtual_environment_download_file.flatcar_image" - ] - } - ] - } - ], - "check_results": null -} diff --git a/terraform/terraform.tfstate.backup b/terraform/terraform.tfstate.backup index e625dca..b42b27d 100644 --- a/terraform/terraform.tfstate.backup +++ b/terraform/terraform.tfstate.backup @@ -1,1495 +1,9 @@ { "version": 4, "terraform_version": "1.12.2", - "serial": 1850, - "lineage": "751616a2-db32-0edf-7258-3ba00b4868bd", + "serial": 243, + "lineage": "d92c42be-29f9-bad9-ef9a-3dc952ff5fa5", "outputs": {}, - "resources": [ - { - "mode": "data", - "type": "ct_config", - "name": "control_plane1_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 755\n - path: /etc/kubernetes/manifests\n #overwrite: true\n mode: 0755\n - path: /etc/install-calico\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane1\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::91/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.91/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n - path: \"/etc/install-calico/custom-resources.yaml\"\n overwrite: true\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\"\n\n - path: /etc/kubernetes/kubeadm-init.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: InitConfiguration\n bootstrapTokens:\n - token: \"kvg1hc.t3rewovrps426rof\"\n description: \"default kubeadm bootstrap token\"\n ttl: \"0\"\n nodeRegistration:\n name: control-plane1\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::91\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::91\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n ---\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: ClusterConfiguration\n controlPlaneEndpoint: \"[fd00:0:0:2::100]:6443\"\n networking:\n podSubnet: \"fd00:0:0:a::/64,10.0.10.0/24\"\n serviceSubnet: \"fd00:0:0:f:1::/108,10.0.91.0/24\"\n dnsDomain: \"k8s.undercloud.local\"\n controllerManager:\n extraArgs:\n flex-volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n bind-address: '::'\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8s.undercloud.local\"\n volumePluginDir: /opt/libexec/kubernetes/kubelet-plugins/volume/exec\n cgroupDriver: \"systemd\"\n\n - path: /etc/kubernetes/calico.yaml\n mode: 0644\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"setting timezone to Europe/Berlin\"'\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-init.service\n enabled: true\n contents: |\n [Unit]\n Description=Kubeadm Init Cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n ConditionPathExists=!/etc/kubernetes/kubelet.conf\n\n [Service]\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"kubeadm-init.service started...\"'\n\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n ExecStart=/bin/sh -c 'echo \"running kubeadm init...\"'\n ExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\n \n # copy files for kubectl\n ExecStart=/bin/sh -c 'echo \"copying files (admin.conf) to core home folder.\"'\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: install-calico.service\n enabled: true\n contents: |\n [Unit]\n Wants=kubeadm-init.target\n After=kubeadm-init.target\n\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n ExecStart=/bin/sh -c 'echo \"install.calico.service started...\"'\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin\n Type=oneshot\n StandardOutput=journal+console\n StandardError=journal+console\n ExecStart=/bin/sh -c 'echo \"witing 30s...\"'\n ExecStart=/bin/sleep 30s\n ExecStart=/bin/sh -c 'echo \"create calico namespace...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\n ExecStart=/bin/sh -c 'echo \"install tigera operator...\"'\n ExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\n ExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 60s\n ExecStart=/bin/sh -c 'echo \"witing for tigera operator... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"create clico custom ressources...\"'\n ExecStart=/opt/bin/kubectl create -f /etc/install-calico/custom-resources.yaml\n \n ExecStart=/bin/sh -c 'echo \"witing 3m..\"'\n ExecStart=/bin/sleep 3m\n #ExecStart=/bin/sh -c 'echo \"apply calico (calico-apiserver)...\"'\n #ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\n #ExecStart=/bin/sh -c 'echo \"witing 1m...\"'\n #ExecStart=/bin/sleep 1m\n ExecStart=/bin/sh -c 'echo \"witing calico-apiserver... (20mini max)\"'\n ExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\n ExecStart=/bin/sh -c 'echo \"witing 120s...\"'\n ExecStart=/bin/sleep 2m\n ExecStart=/bin/sh -c 'echo \"apply calico-peers...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\n ExecStart=/bin/sh -c 'echo \"witing 60s...\"'\n ExecStart=/bin/sleep 1m\n ExecStart=/bin/sh -c 'echo \"apply calico-ippools...\"'\n ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\n \n #ExecStart=/bin/sh -c 'echo \"witing for whisker..\"'\n #ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\n #ExecStart=/bin/sh -c 'echo \"port-forward -n calico-system service/whisker 8081:8081\"'\n #ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\n \n ExecStart=/usr/bin/systemctl disable install-calico.service\n #RemainAfterExit=true\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target\n - name: install-argocd.service\n enabled: true\n contents: |\n [Unit]\n Wants=install-calico.target\n After=install-calico.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin\n Type=oneshot\n\n ExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\n \n ExecStart=/bin/sleep 1m\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\n ExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\n ExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\n \n #ExecStart=/bin/sleep 10s\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\n ##ExecStart=/bin/sleep 10m\n #ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\n #ExecStart=/bin/sleep 10m\n #ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\n \n ExecStart=/usr/bin/systemctl disable install-argocd.service\n Restart=on-failure\n RestartSec=120s\n [Install]\n WantedBy=multi-user.target", - "id": "1696794206", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/manifests\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS0N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LQ30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//szh4MqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico/custom-resources.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6xSQWvbMBi961cIXXqZLTtW41S3rO2hlEFYxy5jB1n67AorkpFkN92vH7KTrGGFjTECQeD33vfe9z4x6K/gg3aW435sQKh93m9Crh2dygaiqFCvreL4wep462yru9GLqJ1FjXMxRC+GL64HGzjCOMMxvTkm/dSVzzKPlYcXN/khsNXau5YgjDFWEKTXQ5ynEgWtGE08jcdn3UVsocRoOCYFQdYp+AydTohZAGFsxR44ls5G70w2GGGhRBhLr5+c7CFyPFp94JRSP1qacEJb8OrNMw9O9gjPJgzE+0P0Yuu7ORXGaWimB45Jq4qCp9+K85ty8TY5M+4hG8zYaZsp7Tkm1A2RGt3AASRNot5ChECP+kdwoAuXzjCCjJPCbHcP91YNTtuYpgs1gY86wFYpDyG8a6LRVu2cjxyvGauQTIxWSxHhEV4TA6CWFVzLVgpZA6trJtfQSlGwNRNspTY3dVsVbV0VRcUKtb6uN821bBsmKlWxhqAsy9BfH8utGUMEf3kvx352qZ5zQEy+vU1TFsV3niIQZCG+ON9r26UtDE49jY1NXf6KLzina/ahLPIiT390xdIyAvhJS/gd3/KSc1oWm4VyU54pyoY7txd6Pt1NyEerwEvjRpXPnZCTewP+k7CiA59cweWZtAYO2f+7hiSZis3Eqfkrzq/ebcJAzOW87ctCymMhjwvmspCzLOGcoGcQJj7/+Kit2l58kEuZf9jPYn03B7lLof8hM5Kdd+Nw5/UEaW3hNUTYK4J+BgAA///wuZ+zpgQAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/calico.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\nConditionPathExists=!/etc/kubernetes/kubelet.conf\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.target\\nAfter=kubeadm-init.target\\n\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nExecStart=/bin/sh -c 'echo \\\"witing 30s...\\\"'\\nExecStart=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"create calico namespace...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"create clico custom ressources...\\\"'\\nExecStart=/opt/bin/kubectl create -f /etc/install-calico/custom-resources.yaml\\n\\nExecStart=/bin/sh -c 'echo \\\"witing 3m..\\\"'\\nExecStart=/bin/sleep 3m\\n#ExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\n#ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\n#ExecStart=/bin/sh -c 'echo \\\"witing 1m...\\\"'\\n#ExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\n\\n#ExecStart=/bin/sh -c 'echo \\\"witing for whisker..\\\"'\\n#ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\\n#ExecStart=/bin/sh -c 'echo \\\"port-forward -n calico-system service/whisker 8081:8081\\\"'\\n#ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\\n\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=install-calico.target\\nAfter=install-calico.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\n\\nExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\\n\\nExecStart=/bin/sleep 1m\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\\nExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\\n\\n#ExecStart=/bin/sleep 10s\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\\n##ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\\n#ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\\n\\nExecStart=/usr/bin/systemctl disable install-argocd.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\",\n \"enabled\": true,\n \"name\": \"install-argocd.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "data", - "type": "ct_config", - "name": "control_plane2_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane2\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::92/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.92/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n controlPlane:\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::92\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n nodeRegistration:\n name: control-plane2\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::92\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8aux.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::a\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n\n # copy files for kubectl\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", - "id": "2829651611", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrSyN9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LI30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//eq4bWKsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST2+cPBDG73wKizvgBbJsfNs3yeFtpGrVrXKpejD2sBnhtdHYppt++sqwSrOVeqq4IHjmzzPPT074AuTRWcHG2IPU53Lc+RJdNW96CLLJRrRasE8O7YOzA54iyYDOZsrZQM4cjLQgMsaMU9LsD/8/WT05tCF9Y0zqGSigh73WBN4Llg+ac5GeWoj7Ol9kPVp9cBQE27ZtkzGmUtWASgZ4hrdUBdCpBu7UoKTqoO26Vm1hUJK321a2td7dd0PDh67hvGm53t51u/5ODX0rG920fZ5Zp+ELnNCH1UFa0MozCHa1UkzJS52mEx6dGiEIFi1eRFVVFG2VdBItkP7wWnqnxowt5zMQni6B5J5OfvWfhhY4/cX27Ew8QzGZeEJbaCTB8spNoTLYwwVUlZqShQC+uva/in211laLLM80euVmoLc0tncuJJfTVzeCvQYx4RFoBnrPh+XfPu604fy7SNdfVwtLKcvH+bR5VWVoCH64mSbf1ltywyqK1ssBjiNOD/sXoDWwBaZAEbKiKLI/CTMQSrWAdAva5gra86q5ZU2+wyNEnr2CNOH1539o9f7mhzLRB6BHd5a47L6T8VJGq4GUcVGXC6O/dZ+P6TYFy2vON6LtuOjqgYtBbISQeaZO5OL0SDhDCsa/+QBnnbNsPf1hCeLxH0L7FQAA///TJ/QufgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "data", - "type": "ct_config", - "name": "control_plane3_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n control-plane3\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::93/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.93/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n controlPlane:\n localAPIEndpoint:\n advertiseAddress: \"fd00:0:0:2::93\"\n bindPort: 6443\n certificateKey: \"fee7c3e5cfcac7e4774c6efca0464a42d897f30f7300340d6578b5cfb4a3d34b\"\n nodeRegistration:\n name: control-plane3\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::93\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8aux.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::a\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n\n # copy files for kubectl\n ExecStartPost=/usr/bin/mkdir -p /home/core/.kube\n ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\n ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", - "id": "1645840555", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS2N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LY30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//Pdw6fqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST2+cPBDG73wKizvgXciy8W3fJIe3kapVt8ql6sHYAxnhtdHYppt++sqwSrOVeqq4IHjmzzPPT074AuTRWcHG2IHU53Lc+xJdNW86CLLORrRasE8O7YOzPQ6RZEBnM+VsIGeORloQGWPGKWkOx/+frJ4c2pC+MSb1DBTQw0FrAu8Fy3vNuUjPVoj7Ol9kHVp9dBQE2zVNnTGmUlWPSgZ4hrdUBdCqGu5Ur6RqoWnbRu2gV5I3u0Y2W72/b/ua923Ned1wvbtr992d6rtG1rpuujyzTsMXGNCH1UFa0MozCHa1UkzJyzKd8OTUCEGwaPEiqqqiaKukk2iB9IfX0js1Zmw5n4HwdAkkDzT41X8aWuD0F9uzM/EMxWTigLbQSILllZtCZbCDC6gqNSULAXx17X8V+2qtrRZZnmn0ys1Ab2ls51xILqevbgR7DWLCE9AM9J4Py7993GnD+XeRrr+uFpZSlo/zsHlVZagJfriZJt9sd+T6VRStlz2cRpweDi9Aa2ALTIEiZEVRZH8SZiCUagHpFrTNFbTnVXPLmnyHR4g8ewVpwuvP/9Dqw80PZaIPQI/uLHHZfS/jpYxWAynjoi4XRn/rPp/SbQqWbznfiKblot32XPRiI4TMMzWQi9Mj4QwpGP/mA5x1zrL19McliMd/CO1XAAAA//8qgsmcfgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "data", - "type": "ct_config", - "name": "worker1_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker1\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::101/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.101/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker1\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::101\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8aux.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::a\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", - "id": "701272205", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwNDfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//u2Lr160AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bMAyF7/4Vgu627CRoC96yppcNGIZl6GXYQZFoh7AjGpTkpvv1g+0AW3YtdBHER/LpfXakV5RIHED1+YTWX6r+KVbEZmpOmOy26Cl4UJ+ZwjOHlrosNhGHIrDH79hRTOsDFEoFe0FQbyw9SlMo5YSO7HpMoHKgKxhjJAfjOCRLAcX/c60iu75Qi40B08s1id1LF+e5Ss3bShpB6dbXNcxnA9DUjV7KEw/5guU45I5C6UlAacNjMgOd8IrOzFMlYMJobgtu4mjWXrPIdOEpOp5Q3ue9J+Y0/2/8wT2G1Ykd6YgyobwEPzKFBEr/vDdV/4KH3W67WktLq9L91DVnV6Wt4BtPMsbd5kG4XUU5RNvisafxef+KQi25NVSVJGNRlmXxP6oBU+UWIvfEmhuxL6vmHpr1XjBGUBpAF2e0Qzr//kTB7+8KbsgxoRz4Ymnx/mTztcrBo7iBs68Gdnb4q/t6nLMpld7UdQO7xxoeN20NLTQAVheuE87jQWjCGUx8jwkvXqtijf7bAuLwAWh/AgAA//+YntpNxwIAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "data", - "type": "ct_config", - "name": "worker2_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker2\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::102/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.102/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker2\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::102\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8aux.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::a\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", - "id": "43186052", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwMjfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//qDlrN60AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bPgzF7/4Ugu62bDdoC97yb3r5DxiGZehl2EGRaIewIxqU5Kb79IPtAFt2HXQRxEfy6f3sRG8okTiAGvIJrb9Uw3OsiM3cnDDZh2Kg4EH9zxReOHTUZ7GJOBSBPX7FnmLaHqBQKtgLgnpnGVDaQikndGQ3YAKVA13BGCM5GMchWQoo/o9rFdkNhVptjJher0nsXvq4zFVq2VbSBEp3vq5hOS1AU7d6Lc885guW05h7CqUnAaUNT8mMdMIrOrNMlYAJo7ktuImj2XrNKtOFp+h4RvlY9p6Y0/K/6RsPGDYndqIjyozyGvzEFBIo/f3eVP0DHne7h81aWluVHua+ObsqPQi+8yxT3LWPwt0myiHaDo8DTS/7NxTqyG2hqiQZi7Isi79RjZgqtxK5J9bciH3aNPfQrPeCMYLSALo4ox3T+ed/FPz+ruDGHBPKgS+WVu/PNl+rHDyKGzn7amRnx9+6z8clm1Lptq4b2D3V8NR2NXTQAFhduF44TwehGRcw8SMmvHitii36LyuIwz9A+xUAAP//0WjE18cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "data", - "type": "ct_config", - "name": "worker3_ignition", - "provider": "provider[\"registry.terraform.io/poseidon/ct\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": "variant: flatcar\nversion: 1.1.0\n\npasswd:\n users:\n - name: core\n ssh_authorized_keys:\n - \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n\nstorage:\n directories:\n - path: /opt/bin\n overwrite: true\n mode: 0755\n - path: /opt/cni/bin\n overwrite: true\n mode: 0755\n files:\n - path: /etc/hostname\n mode: 0644\n contents:\n inline: |\n worker3\n\n - path: /etc/systemd/network/00-eth.network\n mode: 0644\n contents:\n inline: |\n [Match]\n Name=eth*\n \n [Network]\n Address=fd00:0:0:2::103/64\n Gateway=fd00:0:0:2::3\n DNS=fd00:0:0:3::1\n Address=10.0.2.103/24\n Gateway=10.0.2.3\n DNS=10.0.3.1\n Domains=undercloud.local\n\n - path: /etc/hosts\n mode: 0644\n overwrite: true\n contents:\n inline: |\n 127.0.0.1 localhost\n ::1 localhost ip6-localhost ip6-loopback\n fd00:0:0:2::91 control-plane1.undercloud.local control-plane1\n fd00:0:0:2::92 control-plane2.undercloud.local control-plane2\n fd00:0:0:2::93 control-plane3.undercloud.local control-plane3\n fd00:0:0:2::101 worker1.undercloud.local worker1\n fd00:0:0:2::102 worker2.undercloud.local worker2\n fd00:0:0:2::103 worker3.undercloud.local worker3\n 10.0.2.91 control-plane1.undercloud.local control-plane1\n 10.0.2.92 control-plane2.undercloud.local control-plane2\n 10.0.2.93 control-plane3.undercloud.local control-plane3\n 10.0.2.101 worker1.undercloud.local worker1\n 10.0.2.102 worker2.undercloud.local worker2\n 10.0.2.103 worker3.undercloud.local worker3\n\n - path: /etc/motd\n mode: 0644\n overwrite: true\n contents:\n inline: |\n *******************************************************************\n * AUTHORIZED ACCESS ONLY *\n * *\n * This system is part of a secured infrastructure. *\n * All activities are monitored and logged. *\n * Unauthorized access or misuse is strictly prohibited and *\n * may result in disciplinary and legal action. *\n *******************************************************************\n\n --------------------------------------------------------------------------------\n kubernetes controle plane Node\n\n Manage via:\n kubectl (kubectl)\n calico (calicoctl)\n velero - backup (velero)\n argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/\n --------------------------------------------------------------------------------\n\n - path: /etc/sysctl.d/99-k8s.conf\n mode: 0644\n contents:\n inline: |\n net.ipv4.ip_forward = 1\n net.ipv6.ip_forward = 1\n net.ipv6.conf.all.forwarding = 1\n net.ipv4.conf.all.forwarding = 1\n net.bridge.bridge-nf-call-iptables = 1\n net.bridge.bridge-nf-call-ip6tables = 1\n net.netfilter.nf_conntrack_max = 1000000\n net.ipv4.conf.all.rp_filter = 0\n net.ipv6.conf.all.disable_ipv6 = 0\n vm.overcommit_memory = 1\n fs.inotify.max_user_watches = 524288\n fs.inotify.max_user_instances = 512\n kernel.panic = 10\n kernel.panic_on_oops = 1\n\n - path: /etc/flatcar/update.conf\n overwrite: true\n mode: 0420\n contents:\n inline: |\n REBOOT_STRATEGY=off\n\n - path: /opt/bin/kubeadm\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\"\n\n - path: /opt/bin/kubelet\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\"\n\n - path: /opt/bin/kubectl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\"\n\n - path: /opt/bin/calicoctl\n mode: 0755\n contents:\n source: \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\"\n\n - path: /etc/kubernetes/kubeadm-join.yaml\n mode: 0644\n contents:\n inline: |\n apiVersion: kubeadm.k8s.io/v1beta3\n kind: JoinConfiguration\n nodeRegistration:\n name: worker3\n criSocket: unix:///run/containerd/containerd.sock\n kubeletExtraArgs:\n node-ip: \"fd00:0:0:2::103\"\n volume-plugin-dir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n discovery:\n bootstrapToken:\n apiServerEndpoint: \"[fd00:0:0:2::100]:6443\"\n token: \"kvg1hc.t3rewovrps426rof\"\n unsafeSkipCAVerification: true\n ---\n apiVersion: kubelet.config.k8s.io/v1beta1\n kind: KubeletConfiguration\n address: \"::\"\n healthzBindAddress: \"::\"\n clusterDomain: \"k8aux.undercloud.local\"\n clusterDNS:\n - \"2001:470:72f0:f:1::a\"\n cgroupDriver: \"systemd\" \n volumePluginDir: \"/opt/libexec/kubernetes/kubelet-plugins/volume/exec/\"\n\nsystemd:\n units:\n - name: modules-load.service\n enabled: true\n contents: |\n [Unit]\n Description=Load necessary kernel modules\n Before=containerd.service kubeadm-init.service\n\n [Service]\n Type=oneshot\n ExecStart=/usr/bin/modprobe br_netfilter\n ExecStart=/usr/bin/modprobe overlay\n RemainAfterExit=yes\n\n [Install]\n WantedBy=multi-user.target\n\n - name: systemd-networkd-wait-online.service\n enabled: true\n\n - name: containerd.service\n enabled: true\n contents: |\n [Unit]\n Description=containerd container runtime\n After=network.target modules-load.service\n\n [Service]\n ExecStart=/usr/bin/containerd\n Restart=always\n RestartSec=5\n Delegate=yes\n KillMode=process\n OOMScoreAdjust=-999\n\n [Install]\n WantedBy=multi-user.\n\n - name: set-timezone.service\n enabled: true\n contents: |\n [Unit]\n Description=Set Timezone\n After=network-online.target\n Wants=network-online.target\n [Service]\n StandardOutput=journal+console\n StandardError=journal+console\n Type=oneshot\n Restart=on-failure\n ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\n ExecStart=/usr/bin/timedatectl set-ntp true \n [Install]\n WantedBy=kubeadm.service\n\n - name: kubelet.service\n enabled: true\n contents: |\n [Unit]\n Description=kubelet, the Kubernetes Node Agent\n Documentation=https://kubernets.io/docs/home\n Wants=network-online.target\n After=network-online.target\n [Service]\n #StandardOutput=journal+console\n #StandardError=journal+console\n #EnvironmentFile=/run/metadata/coreos\n Environment=\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\"\n Environment=\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\"\n # This is a file that \"kubeadm init\" and \"kubeadm join\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\n EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\n ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\n Restart=always\n StartLimitInterval=0\n RestartSec=10\n [Install]\n WantedBy=multi-user.target\n\n - name: kubeadm-join.service\n enabled: true\n contents: |\n [Unit]\n Description=Join node to Kubernetes cluster\n After=network-online.target containerd.service kubelet.service\n Wants=network-online.target\n\n [Service]\n Type=oneshot\n # Environment\n Environment=KUBECONFIG=/etc/kubernetes/admin.conf\n Environment=DATASTORE_TYPE=kubernetes\n Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\n \n ExecStartPre=/bin/sleep 30s\n\n ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\n \n #ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\n Restart=on-failure\n RestartSec=120s\n\n [Install]\n WantedBy=multi-user.target\n", - "id": "1150707983", - "pretty_print": true, - "rendered": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwNjfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//WQ/raK0AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bMAyF7/4Vgu627CRoC96yppcNGIZl6GXYQZFoh7AjGpTkpvv1g+0AW3YtdBHER/LpfXakV5RIHED1+YTWX6r+KVbEZmpOmOy26Cl4UJ+ZwjOHlrosNhGHIrDH79hRTOsDFEoFe0FQbyw9yrZQygkd2fWYQOVAVzDGSA7GcUiWAor/51pFdn2hFhsDppdrEruXLs5zlZq3lTSC0q2va5jPBqCpt3opTzzkC5bjkDsKpScBpQ2PyQx0wis6M0+VgAmjuS24iaNZe80i04Wn6HhCeZ/3npjT/L/xB/cYVid2pCPKhPIS/MgUEij9895U/QsedrubtbS0Kt1PXXN2VdoKvvEkY9xtHoTbVZRDtC0eexqf968o1JJbQ1VJMhZlWRb/oxowVW4hck+suRH7smruoVnvBWMEpQF0cUY7pPPvTxT8/q7ghhwTyoEvlhbvTzZfqxw8ihs4+2pgZ4e/uq/HOZtS6U1dN7B7rOFx09bQQgNgdeE64TwehCacwcT3mPDitSrW6L8tIA4fgPYnAAD//ynHHhfHAgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "snippets": null, - "strict": false - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "managed", - "type": "null_resource", - "name": "wait_for_cp1", - "provider": "provider[\"registry.terraform.io/hashicorp/null\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "1931730887386844189", - "triggers": null - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "null_resource", - "name": "wait_for_cp3", - "provider": "provider[\"registry.terraform.io/hashicorp/null\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "2521654432915739147", - "triggers": null - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane3_ignition", - "null_resource.wait_for_cp1", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane3_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.control_plane3", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_download_file", - "name": "flatcar_image", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "checksum": null, - "checksum_algorithm": null, - "content_type": "import", - "datastore_id": "cephfs", - "decompression_algorithm": null, - "file_name": "flatcar_production_proxmoxve_image.qcow2", - "id": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", - "node_name": "hyper1", - "overwrite": true, - "overwrite_unmanaged": false, - "size": 573243392, - "upload_timeout": 600, - "url": "http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/iso/flatcar_production_proxmoxve_image.img", - "verify": true - }, - "sensitive_attributes": [], - "identity_schema_version": 0 - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "control_plane1_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "control-plane1-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/control-plane1-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 755\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/manifests\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS0N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LQ30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//szh4MqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/install-calico/custom-resources.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/custom-resources.yaml\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-init.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6xSQWvbMBi961cIXXqZLTtW41S3rO2hlEFYxy5jB1n67AorkpFkN92vH7KTrGGFjTECQeD33vfe9z4x6K/gg3aW435sQKh93m9Crh2dygaiqFCvreL4wep462yru9GLqJ1FjXMxRC+GL64HGzjCOMMxvTkm/dSVzzKPlYcXN/khsNXau5YgjDFWEKTXQ5ynEgWtGE08jcdn3UVsocRoOCYFQdYp+AydTohZAGFsxR44ls5G70w2GGGhRBhLr5+c7CFyPFp94JRSP1qacEJb8OrNMw9O9gjPJgzE+0P0Yuu7ORXGaWimB45Jq4qCp9+K85ty8TY5M+4hG8zYaZsp7Tkm1A2RGt3AASRNot5ChECP+kdwoAuXzjCCjJPCbHcP91YNTtuYpgs1gY86wFYpDyG8a6LRVu2cjxyvGauQTIxWSxHhEV4TA6CWFVzLVgpZA6trJtfQSlGwNRNspTY3dVsVbV0VRcUKtb6uN821bBsmKlWxhqAsy9BfH8utGUMEf3kvx352qZ5zQEy+vU1TFsV3niIQZCG+ON9r26UtDE49jY1NXf6KLzina/ahLPIiT390xdIyAvhJS/gd3/KSc1oWm4VyU54pyoY7txd6Pt1NyEerwEvjRpXPnZCTewP+k7CiA59cweWZtAYO2f+7hiSZis3Eqfkrzq/ebcJAzOW87ctCymMhjwvmspCzLOGcoGcQJj7/+Kit2l58kEuZf9jPYn03B7lLof8hM5Kdd+Nw5/UEaW3hNUTYK4J+BgAA///wuZ+zpgQAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/calico.yaml\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calico.yaml\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\n\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"setting timezone to Europe/Berlin\\\"'\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Kubeadm Init Cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\nConditionPathExists=!/etc/kubernetes/kubelet.conf\\n\\n[Service]\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"kubeadm-init.service started...\\\"'\\n\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"running kubeadm init...\\\"'\\nExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml\\n\\n# copy files for kubectl\\nExecStart=/bin/sh -c 'echo \\\"copying files (admin.conf) to core home folder.\\\"'\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\nExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-init.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=kubeadm-init.target\\nAfter=kubeadm-init.target\\n\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nExecStart=/bin/sh -c 'echo \\\"install.calico.service started...\\\"'\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\nStandardOutput=journal+console\\nStandardError=journal+console\\nExecStart=/bin/sh -c 'echo \\\"witing 30s...\\\"'\\nExecStart=/bin/sleep 30s\\nExecStart=/bin/sh -c 'echo \\\"create calico namespace...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml\\nExecStart=/bin/sh -c 'echo \\\"install tigera operator...\\\"'\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/operator-crds.yaml\\nExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 60s\\nExecStart=/bin/sh -c 'echo \\\"witing for tigera operator... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"create clico custom ressources...\\\"'\\nExecStart=/opt/bin/kubectl create -f /etc/install-calico/custom-resources.yaml\\n\\nExecStart=/bin/sh -c 'echo \\\"witing 3m..\\\"'\\nExecStart=/bin/sleep 3m\\n#ExecStart=/bin/sh -c 'echo \\\"apply calico (calico-apiserver)...\\\"'\\n#ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml\\n#ExecStart=/bin/sh -c 'echo \\\"witing 1m...\\\"'\\n#ExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"witing calico-apiserver... (20mini max)\\\"'\\nExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=1200s\\nExecStart=/bin/sh -c 'echo \\\"witing 120s...\\\"'\\nExecStart=/bin/sleep 2m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-peers...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml\\nExecStart=/bin/sh -c 'echo \\\"witing 60s...\\\"'\\nExecStart=/bin/sleep 1m\\nExecStart=/bin/sh -c 'echo \\\"apply calico-ippools...\\\"'\\nExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml\\n\\n#ExecStart=/bin/sh -c 'echo \\\"witing for whisker..\\\"'\\n#ExecStart=/opt/bin/kubectl wait deployment -n calico-system whisker --for condition=Available=True --timeout=1200s\\n#ExecStart=/bin/sh -c 'echo \\\"port-forward -n calico-system service/whisker 8081:8081\\\"'\\n#ExecStart=/opt/bin/kubectl port-forward -n calico-system service/whisker 8081:8081\\n\\nExecStart=/usr/bin/systemctl disable install-calico.service\\n#RemainAfterExit=true\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"install-calico.service\"\n },\n {\n \"contents\": \"[Unit]\\nWants=install-calico.target\\nAfter=install-calico.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\n\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin\\nType=oneshot\\n\\nExecStart=/opt/bin/kubectl wait deployment -n kube-system coredns --for condition=Available=True --timeout=600s\\n\\nExecStart=/bin/sleep 1m\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/namespace.yaml\\nExecStart=/opt/bin/kubectl apply -n argocd -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/argocd/install.yaml\\nExecStart=/opt/bin/kubectl wait deployment -n argocd argocd-server --for condition=Available=True --timeout=600s\\n\\n#ExecStart=/bin/sleep 10s\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-bootstrap.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/repos/k8aux-apps.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/argocd.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/calico.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/rook-ceph.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/apps/gitea.yaml\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://aux-balancer.undercloud.cf:3000/undercloud/k8aux-bootstrap/raw/branch/main/argocd/argocd-secret.yaml\\n##ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl wait deployment -n gitea gitea --for condition=Available=True --timeout=4800s\\n#ExecStart=/bin/sleep 10m\\n#ExecStart=/opt/bin/kubectl apply -n argocd -f http://gitea.gitea.svc.k8aux.undercloud.cf:3000/undercloud/k8aux-apps/raw/branch/main/app-of-apps/app-of-apps.yaml\\n\\nExecStart=/usr/bin/systemctl disable install-argocd.service\\nRestart=on-failure\\nRestartSec=120s\\n[Install]\\nWantedBy=multi-user.target\",\n \"enabled\": true,\n \"name\": \"install-argocd.service\"\n }\n ]\n }\n}", - "file_name": "control-plane1-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "control_plane2_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "control-plane2-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/control-plane2-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrSyN9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LI30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//eq4bWKsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST2+cPBDG73wKizvgBbJsfNs3yeFtpGrVrXKpejD2sBnhtdHYppt++sqwSrOVeqq4IHjmzzPPT074AuTRWcHG2IPU53Lc+RJdNW96CLLJRrRasE8O7YOzA54iyYDOZsrZQM4cjLQgMsaMU9LsD/8/WT05tCF9Y0zqGSigh73WBN4Llg+ac5GeWoj7Ol9kPVp9cBQE27ZtkzGmUtWASgZ4hrdUBdCpBu7UoKTqoO26Vm1hUJK321a2td7dd0PDh67hvGm53t51u/5ODX0rG920fZ5Zp+ELnNCH1UFa0MozCHa1UkzJS52mEx6dGiEIFi1eRFVVFG2VdBItkP7wWnqnxowt5zMQni6B5J5OfvWfhhY4/cX27Ew8QzGZeEJbaCTB8spNoTLYwwVUlZqShQC+uva/in211laLLM80euVmoLc0tncuJJfTVzeCvQYx4RFoBnrPh+XfPu604fy7SNdfVwtLKcvH+bR5VWVoCH64mSbf1ltywyqK1ssBjiNOD/sXoDWwBaZAEbKiKLI/CTMQSrWAdAva5gra86q5ZU2+wyNEnr2CNOH1539o9f7mhzLRB6BHd5a47L6T8VJGq4GUcVGXC6O/dZ+P6TYFy2vON6LtuOjqgYtBbISQeaZO5OL0SDhDCsa/+QBnnbNsPf1hCeLxH0L7FQAA///TJ/QufgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "file_name": "control-plane2-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane2_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "control_plane3_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "control-plane3-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/control-plane3-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,control-plane3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrS2N9MxMu98SS1PLEShQZYy4Xv2CEiLGVlSHcBEMDPQM9Iz1LY30jhGaoIEQfmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//Pdw6fqsAAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SST2+cPBDG73wKizvgXciy8W3fJIe3kapVt8ql6sHYAxnhtdHYppt++sqwSrOVeqq4IHjmzzPPT074AuTRWcHG2IHU53Lc+xJdNW86CLLORrRasE8O7YOzPQ6RZEBnM+VsIGeORloQGWPGKWkOx/+frJ4c2pC+MSb1DBTQw0FrAu8Fy3vNuUjPVoj7Ol9kHVp9dBQE2zVNnTGmUlWPSgZ4hrdUBdCqGu5Ur6RqoWnbRu2gV5I3u0Y2W72/b/ua923Ned1wvbtr992d6rtG1rpuujyzTsMXGNCH1UFa0MozCHa1UkzJyzKd8OTUCEGwaPEiqqqiaKukk2iB9IfX0js1Zmw5n4HwdAkkDzT41X8aWuD0F9uzM/EMxWTigLbQSILllZtCZbCDC6gqNSULAXx17X8V+2qtrRZZnmn0ys1Ab2ls51xILqevbgR7DWLCE9AM9J4Py7993GnD+XeRrr+uFpZSlo/zsHlVZagJfriZJt9sd+T6VRStlz2cRpweDi9Aa2ALTIEiZEVRZH8SZiCUagHpFrTNFbTnVXPLmnyHR4g8ewVpwuvP/9Dqw80PZaIPQI/uLHHZfS/jpYxWAynjoi4XRn/rPp/SbQqWbznfiKblot32XPRiI4TMMzWQi9Mj4QwpGP/mA5x1zrL19McliMd/CO1XAAAA//8qgsmcfgMAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n# copy files for kubectl\\nExecStartPost=/usr/bin/mkdir -p /home/core/.kube\\nExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config\\nExecStartPost=/usr/bin/chown core:core /home/core/.kube/config\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "file_name": "control-plane3-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane3_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "worker1_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "worker1-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/worker1-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker1%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwNDfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//u2Lr160AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bMAyF7/4Vgu627CRoC96yppcNGIZl6GXYQZFoh7AjGpTkpvv1g+0AW3YtdBHER/LpfXakV5RIHED1+YTWX6r+KVbEZmpOmOy26Cl4UJ+ZwjOHlrosNhGHIrDH79hRTOsDFEoFe0FQbyw9SlMo5YSO7HpMoHKgKxhjJAfjOCRLAcX/c60iu75Qi40B08s1id1LF+e5Ss3bShpB6dbXNcxnA9DUjV7KEw/5guU45I5C6UlAacNjMgOd8IrOzFMlYMJobgtu4mjWXrPIdOEpOp5Q3ue9J+Y0/2/8wT2G1Ykd6YgyobwEPzKFBEr/vDdV/4KH3W67WktLq9L91DVnV6Wt4BtPMsbd5kG4XUU5RNvisafxef+KQi25NVSVJGNRlmXxP6oBU+UWIvfEmhuxL6vmHpr1XjBGUBpAF2e0Qzr//kTB7+8KbsgxoRz4Ymnx/mTztcrBo7iBs68Gdnb4q/t6nLMpld7UdQO7xxoeN20NLTQAVheuE87jQWjCGUx8jwkvXqtijf7bAuLwAWh/AgAA//+YntpNxwIAAA==\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "file_name": "worker1-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.worker1_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "worker2_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "worker2-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/worker2-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker2%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwMjfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//qDlrN60AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bPgzF7/4Ugu62bDdoC97yb3r5DxiGZehl2EGRaIewIxqU5Kb79IPtAFt2HXQRxEfy6f3sRG8okTiAGvIJrb9Uw3OsiM3cnDDZh2Kg4EH9zxReOHTUZ7GJOBSBPX7FnmLaHqBQKtgLgnpnGVDaQikndGQ3YAKVA13BGCM5GMchWQoo/o9rFdkNhVptjJher0nsXvq4zFVq2VbSBEp3vq5hOS1AU7d6Lc885guW05h7CqUnAaUNT8mMdMIrOrNMlYAJo7ktuImj2XrNKtOFp+h4RvlY9p6Y0/K/6RsPGDYndqIjyozyGvzEFBIo/f3eVP0DHne7h81aWluVHua+ObsqPQi+8yxT3LWPwt0myiHaDo8DTS/7NxTqyG2hqiQZi7Isi79RjZgqtxK5J9bciH3aNPfQrPeCMYLSALo4ox3T+ed/FPz+ruDGHBPKgS+WVu/PNl+rHDyKGzn7amRnx9+6z8clm1Lptq4b2D3V8NR2NXTQAFhduF44TwehGRcw8SMmvHitii36LyuIwz9A+xUAAP//0WjE18cCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "file_name": "worker2-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.worker2_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_file", - "name": "worker3_ignition", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content_type": "snippets", - "datastore_id": "cephfs", - "file_mode": null, - "file_modification_date": null, - "file_name": "worker3-ignition-user-data", - "file_size": null, - "file_tag": null, - "id": "cephfs:snippets/worker3-ignition-user-data", - "node_name": "hyper1", - "overwrite": true, - "source_file": [], - "source_raw": [ - { - "data": "{\n \"ignition\": {\n \"config\": {\n \"replace\": {\n \"verification\": {}\n }\n },\n \"proxy\": {},\n \"security\": {\n \"tls\": {}\n },\n \"timeouts\": {},\n \"version\": \"3.4.0\"\n },\n \"kernelArguments\": {},\n \"passwd\": {\n \"users\": [\n {\n \"name\": \"core\",\n \"sshAuthorizedKeys\": [\n \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHEAlPo3v4U67Y3411pTjIMkQxwlFWdXrBJkSzXenDH flatcar@undercloud\"\n ]\n }\n ]\n },\n \"storage\": {\n \"directories\": [\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/bin\",\n \"user\": {},\n \"mode\": 493\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/opt/cni/bin\",\n \"user\": {},\n \"mode\": 493\n }\n ],\n \"files\": [\n {\n \"group\": {},\n \"path\": \"/etc/hostname\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,worker3%0A\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/systemd/network/00-eth.network\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4r2TSxJzojl8kvMTbVNLcnQ4uKK9kstKc8vyo7lckxJKUotLrZNSzEwsAJBIysrQwNjfTMTLvfEktTyxEoUKWMuF79ghIixlZUh3AhDAz0DPSM9kG4jhG6oKEQjmGOsZ8jlkp+bmJlXbFual5JalJyTX5qil5OfnJjDBQgAAP//WQ/raK0AAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/hosts\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/5TRYcqDMAwG4P+eIhewNAl8H/Y2rjo2LEY6Zdcf3arQMgnTX83bB8obpH9jjTUIAEF8H27yWBvn0vnzHVO4L39tfZLl0vupuQ7WuvSTcx2Cl3mNEtol9POIZpuHMfog22DevspLTWVKiqZSc5myornQaBGeEqcxfnlzDipAeU5ngCrAec5ngBtMCyHTpR382OROqaZ6jTvlmuodZprq0/s7LhPo3R2XGfTeXgEAAP//kbvS+8wCAAA=\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/motd\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6yRwYobMQyG7/MUOmYX4lzL3sJ2oYV2A01yaG+KrMyIeOxBkodOn74kk0MPaSg0/8WWkD4+4+f/T/MMf8l6v/u0+fb5x9tHWL++vm23sHn/8v3G4B3GP+fC2HViYJM59yAGA6pDOQKCMVXlCJKPiuZayatyuMVYpwRILqO4sAEqQ1+yeDnvY46QSttyDPc89hmrd0Xl13mHiM2gKPRi1fhsZq5CniYYtHRyEL+y/2D0OIGy1eQgGaIYyZAko06zBbc4i5Ycbns84G+b5YPTnOqBNbOzAZXsWhLDkDAzvJfITfMVM7YMo+DLZZY8weJ6eWoIk1CBxXxeWiMn1gJLOCCd6gCLufHUoLaFInTug72sVnO5NNaRNcxVsJHC6QPWn6HmyEqp1BjouHr8u38HAAD//04R3qpqAwAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"path\": \"/etc/sysctl.d/99-k8s.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/4zQX27CMAwG8HdO0QsQASqIF85imdRhFokdOVmB20/duu6PMo3vJVL0i+wvQtVxHnvHGYLaDW3ofufUbVezOzztvEpwGKObNcul4fpn3Nl4uNB8rCWsPca45lzxHKn87w4z/HRCNXCsZE4CeBWphv4KCe/L3M17Gltaho+331pvGq0HLtNQmG6/3JicjmReU+IKiZLao/WLoTgWrRweLuEdXgsZ3LD6l6nu4va7fnc8NjFLqSh+4aduv92trmRC0WUU9t0fmcr/gKACqrm09nwLAAD//0919F4+AgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n },\n {\n \"group\": {},\n \"overwrite\": true,\n \"path\": \"/etc/flatcar/update.conf\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"\",\n \"source\": \"data:,REBOOT_STRATEGY%3Doff%0A\",\n \"verification\": {}\n },\n \"mode\": 272\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubeadm\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubeadm\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubelet\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubelet\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/kubectl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/kubectl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/opt/bin/calicoctl\",\n \"user\": {},\n \"contents\": {\n \"source\": \"http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/k8s-binaries/calicoctl\",\n \"verification\": {}\n },\n \"mode\": 493\n },\n {\n \"group\": {},\n \"path\": \"/etc/kubernetes/kubeadm-join.yaml\",\n \"user\": {},\n \"contents\": {\n \"compression\": \"gzip\",\n \"source\": \"data:;base64,H4sIAAAAAAAC/6SRQW/bMAyF7/4Vgu627CRoC96yppcNGIZl6GXYQZFoh7AjGpTkpvv1g+0AW3YtdBHER/LpfXakV5RIHED1+YTWX6r+KVbEZmpOmOy26Cl4UJ+ZwjOHlrosNhGHIrDH79hRTOsDFEoFe0FQbyw9yrZQygkd2fWYQOVAVzDGSA7GcUiWAor/51pFdn2hFhsDppdrEruXLs5zlZq3lTSC0q2va5jPBqCpt3opTzzkC5bjkDsKpScBpQ2PyQx0wis6M0+VgAmjuS24iaNZe80i04Wn6HhCeZ/3npjT/L/xB/cYVid2pCPKhPIS/MgUEij9895U/QsedrubtbS0Kt1PXXN2VdoKvvEkY9xtHoTbVZRDtC0eexqf968o1JJbQ1VJMhZlWRb/oxowVW4hck+suRH7smruoVnvBWMEpQF0cUY7pPPvTxT8/q7ghhwTyoEvlhbvTzZfqxw8ihs4+2pgZ4e/uq/HOZtS6U1dN7B7rOFx09bQQgNgdeE64TwehCacwcT3mPDitSrW6L8tIA4fgPYnAAD//ynHHhfHAgAA\",\n \"verification\": {}\n },\n \"mode\": 420\n }\n ]\n },\n \"systemd\": {\n \"units\": [\n {\n \"contents\": \"[Unit]\\nDescription=Load necessary kernel modules\\nBefore=containerd.service kubeadm-init.service\\n\\n[Service]\\nType=oneshot\\nExecStart=/usr/bin/modprobe br_netfilter\\nExecStart=/usr/bin/modprobe overlay\\nRemainAfterExit=yes\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"modules-load.service\"\n },\n {\n \"enabled\": true,\n \"name\": \"systemd-networkd-wait-online.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=containerd container runtime\\nAfter=network.target modules-load.service\\n\\n[Service]\\nExecStart=/usr/bin/containerd\\nRestart=always\\nRestartSec=5\\nDelegate=yes\\nKillMode=process\\nOOMScoreAdjust=-999\\n\\n[Install]\\nWantedBy=multi-user.\\n\",\n \"enabled\": true,\n \"name\": \"containerd.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Set Timezone\\nAfter=network-online.target\\nWants=network-online.target\\n[Service]\\nStandardOutput=journal+console\\nStandardError=journal+console\\nType=oneshot\\nRestart=on-failure\\nExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin\\nExecStart=/usr/bin/timedatectl set-ntp true \\n[Install]\\nWantedBy=kubeadm.service\\n\",\n \"enabled\": true,\n \"name\": \"set-timezone.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=kubelet, the Kubernetes Node Agent\\nDocumentation=https://kubernets.io/docs/home\\nWants=network-online.target\\nAfter=network-online.target\\n[Service]\\n#StandardOutput=journal+console\\n#StandardError=journal+console\\n#EnvironmentFile=/run/metadata/coreos\\nEnvironment=\\\"KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf\\\"\\nEnvironment=\\\"KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml\\\"\\n# This is a file that \\\"kubeadm init\\\" and \\\"kubeadm join\\\" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically\\nEnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env\\nExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS\\nRestart=always\\nStartLimitInterval=0\\nRestartSec=10\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubelet.service\"\n },\n {\n \"contents\": \"[Unit]\\nDescription=Join node to Kubernetes cluster\\nAfter=network-online.target containerd.service kubelet.service\\nWants=network-online.target\\n\\n[Service]\\nType=oneshot\\n# Environment\\nEnvironment=KUBECONFIG=/etc/kubernetes/admin.conf\\nEnvironment=DATASTORE_TYPE=kubernetes\\nEnvironment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/\\n\\nExecStartPre=/bin/sleep 30s\\n\\nExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml\\n\\n#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service\\nRestart=on-failure\\nRestartSec=120s\\n\\n[Install]\\nWantedBy=multi-user.target\\n\",\n \"enabled\": true,\n \"name\": \"kubeadm-join.service\"\n }\n ]\n }\n}", - "file_name": "worker3-ignition-user-data", - "resize": 0 - } - ], - "timeout_upload": 1800 - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.worker3_ignition" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "control_plane1", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes control-plane1", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "104", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/control-plane1-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:F5:72:1C" - ], - "machine": null, - "memory": [ - { - "dedicated": 4098, - "floating": 4098, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "control-plane1", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:F5:72:1C", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper1", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "control-plane", - "flatcar", - "kubernetes", - "terraform" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 104, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "control_plane2", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes control-plane2", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "105", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/control-plane2-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:82:79:6A" - ], - "machine": null, - "memory": [ - { - "dedicated": 3072, - "floating": 3072, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "control-plane2", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:82:79:6A", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper2", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "control-plane", - "flatcar", - "kubernetes", - "terraform" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 105, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane2_ignition", - "null_resource.wait_for_cp1", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane2_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "control_plane3", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes control-plane3", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "106", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/control-plane3-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:20:21:56" - ], - "machine": null, - "memory": [ - { - "dedicated": 3072, - "floating": 3072, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "control-plane3", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:20:21:56", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper3", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "control-plane", - "flatcar", - "kubernetes", - "terraform" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 106, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane3_ignition", - "null_resource.wait_for_cp1", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane3_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "flatcar_template", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": [ - "scsi0", - "ide2" - ], - "cdrom": [], - "clone": [], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 1, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "managed by terraform - base template for flatcar", - "disk": [ - { - "aio": "io_uring", - "backup": true, - "cache": "none", - "datastore_id": "Pool1", - "discard": "on", - "file_format": "raw", - "file_id": "", - "import_from": "cephfs:import/flatcar_production_proxmoxve_image.qcow2", - "interface": "virtio0", - "iothread": false, - "path_in_datastore": "vm-103-disk-0", - "replicate": true, - "serial": "", - "size": 10, - "speed": [], - "ssd": false - } - ], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "103", - "initialization": [ - { - "datastore_id": "Pool1", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": "", - "mac_addresses": [ - "BC:24:11:39:25:01" - ], - "machine": "", - "memory": [ - { - "dedicated": 2048, - "floating": 2048, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "flatcar-template", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:39:25:01", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper1", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": null, - "startup": [], - "stop_on_destroy": true, - "tablet_device": true, - "tags": [ - "flatcar", - "kubernetes", - "terraform" - ], - "template": true, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 103, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "proxmox_virtual_environment_download_file.flatcar_image" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "worker1", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes worker1", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "109", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/worker1-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:31:50:0B" - ], - "machine": null, - "memory": [ - { - "dedicated": 8192, - "floating": 8192, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "worker1", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:31:50:0B", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper1", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "flatcar", - "kubernetes", - "terraform", - "worker" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 109, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane3_ignition", - "data.ct_config.worker1_ignition", - "null_resource.wait_for_cp1", - "null_resource.wait_for_cp3", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane3_ignition", - "proxmox_virtual_environment_file.worker1_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.control_plane3", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "worker2", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes worker2", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "107", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/worker2-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:C5:AC:58" - ], - "machine": null, - "memory": [ - { - "dedicated": 8192, - "floating": 8192, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "worker2", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:C5:AC:58", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper2", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "flatcar", - "kubernetes", - "terraform", - "worker" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 107, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane3_ignition", - "data.ct_config.worker2_ignition", - "null_resource.wait_for_cp1", - "null_resource.wait_for_cp3", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane3_ignition", - "proxmox_virtual_environment_file.worker2_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.control_plane3", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - }, - { - "mode": "managed", - "type": "proxmox_virtual_environment_vm", - "name": "worker3", - "provider": "provider[\"registry.terraform.io/bpg/proxmox\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "acpi": true, - "agent": [], - "amd_sev": [], - "audio_device": [], - "bios": "seabios", - "boot_order": null, - "cdrom": [], - "clone": [ - { - "datastore_id": "", - "full": true, - "node_name": "hyper1", - "retries": 1, - "vm_id": 103 - } - ], - "cpu": [ - { - "affinity": "", - "architecture": "", - "cores": 2, - "flags": null, - "hotplugged": 0, - "limit": 0, - "numa": false, - "sockets": 1, - "type": "qemu64", - "units": 1024 - } - ], - "description": "kubernetes worker3", - "disk": [], - "efi_disk": [], - "hook_script_file_id": null, - "hostpci": [], - "id": "108", - "initialization": [ - { - "datastore_id": "local-lvm", - "dns": [], - "interface": "ide2", - "ip_config": [], - "meta_data_file_id": "", - "network_data_file_id": "", - "type": "", - "user_account": [], - "user_data_file_id": "cephfs:snippets/worker3-ignition-user-data", - "vendor_data_file_id": "" - } - ], - "ipv4_addresses": [], - "ipv6_addresses": [], - "keyboard_layout": "en-us", - "kvm_arguments": null, - "mac_addresses": [ - "BC:24:11:87:3C:6A" - ], - "machine": null, - "memory": [ - { - "dedicated": 8192, - "floating": 8192, - "hugepages": "", - "keep_hugepages": false, - "shared": 0 - } - ], - "migrate": false, - "name": "worker3", - "network_device": [ - { - "bridge": "vmbr0", - "disconnected": false, - "enabled": true, - "firewall": false, - "mac_address": "BC:24:11:87:3C:6A", - "model": "virtio", - "mtu": 0, - "queues": 0, - "rate_limit": 0, - "trunks": "", - "vlan_id": 0 - } - ], - "network_interface_names": [], - "node_name": "hyper3", - "numa": [], - "on_boot": true, - "operating_system": [], - "pool_id": null, - "protection": false, - "reboot": false, - "reboot_after_update": true, - "rng": [], - "scsi_hardware": "virtio-scsi-pci", - "serial_device": [], - "smbios": [], - "started": true, - "startup": [], - "stop_on_destroy": false, - "tablet_device": true, - "tags": [ - "flatcar", - "kubernetes", - "terraform", - "worker" - ], - "template": false, - "timeout_clone": 1800, - "timeout_create": 1800, - "timeout_migrate": 1800, - "timeout_move_disk": 1800, - "timeout_reboot": 1800, - "timeout_shutdown_vm": 1800, - "timeout_start_vm": 1800, - "timeout_stop_vm": 300, - "tpm_state": [], - "usb": [], - "vga": [], - "virtiofs": [], - "vm_id": 108, - "watchdog": [] - }, - "sensitive_attributes": [], - "identity_schema_version": 0, - "private": "bnVsbA==", - "dependencies": [ - "data.ct_config.control_plane1_ignition", - "data.ct_config.control_plane3_ignition", - "data.ct_config.worker3_ignition", - "null_resource.wait_for_cp1", - "null_resource.wait_for_cp3", - "proxmox_virtual_environment_download_file.flatcar_image", - "proxmox_virtual_environment_file.control_plane1_ignition", - "proxmox_virtual_environment_file.control_plane3_ignition", - "proxmox_virtual_environment_file.worker3_ignition", - "proxmox_virtual_environment_vm.control_plane1", - "proxmox_virtual_environment_vm.control_plane3", - "proxmox_virtual_environment_vm.flatcar_template" - ] - } - ] - } - ], + "resources": [], "check_results": null } diff --git a/velero.yaml b/velero.yaml new file mode 100644 index 0000000..0518f59 --- /dev/null +++ b/velero.yaml @@ -0,0 +1,3353 @@ +apiVersion: v1 +items: +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: backuprepositories.velero.io + spec: + group: velero.io + names: + kind: BackupRepository + listKind: BackupRepositoryList + plural: backuprepositories + singular: backuprepository + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.repositoryType + name: Repository Type + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupRepositorySpec is the specification for a BackupRepository. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the BackupStorageLocation + that should contain this repository. + type: string + maintenanceFrequency: + description: MaintenanceFrequency is how often maintenance should + be run. + type: string + repositoryType: + description: RepositoryType indicates the type of the backend repository + enum: + - kopia + - restic + - "" + type: string + resticIdentifier: + description: ResticIdentifier is the full restic-compatible string + for identifying this repository. + type: string + volumeNamespace: + description: VolumeNamespace is the namespace this backup repository + contains pod volume backups for. + type: string + required: + - backupStorageLocation + - maintenanceFrequency + - resticIdentifier + - volumeNamespace + type: object + status: + description: BackupRepositoryStatus is the current status of a BackupRepository. + properties: + lastMaintenanceTime: + description: LastMaintenanceTime is the last time maintenance was + run. + format: date-time + nullable: true + type: string + message: + description: Message is a message about the current status of the + BackupRepository. + type: string + phase: + description: Phase is the current state of the BackupRepository. + enum: + - New + - Ready + - NotReady + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: backups.velero.io + spec: + group: velero.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Backup is a Velero resource that represents the capture of + Kubernetes cluster state at a point in time (API objects and associated + volume state). + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the specification for a Velero backup. + properties: + csiSnapshotTimeout: + description: CSISnapshotTimeout specifies the time used to wait + for CSI VolumeSnapshot status turns to ReadyToUse during creation, + before returning error as timeout. The default value is 10 minute. + type: string + datamover: + description: DataMover specifies the data mover to be used by the + backup. If DataMover is "" or "velero", the built-in data mover + will be used. + type: string + defaultVolumesToFsBackup: + description: DefaultVolumesToFsBackup specifies whether pod volume + file system backup should be used for all volumes by default. + nullable: true + type: boolean + defaultVolumesToRestic: + description: "DefaultVolumesToRestic specifies whether restic should + be used to take a backup of all pod volumes by default. \n Deprecated: + this field is no longer used and will be removed entirely in future. + Use DefaultVolumesToFsBackup instead." + nullable: true + type: boolean + excludedClusterScopedResources: + description: ExcludedClusterScopedResources is a slice of cluster-scoped + resource type names to exclude from the backup. If set to "*", + all cluster-scoped resource types are excluded. The default value + is empty. + items: + type: string + nullable: true + type: array + excludedNamespaceScopedResources: + description: ExcludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to exclude from the backup. If set to "*", + all namespace-scoped resource types are excluded. The default + value is empty. + items: + type: string + nullable: true + type: array + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that + are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that + are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed + at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when + backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more BackupResourceHooks + that should be executed based on the rules defined for namespaces, + resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces + to which this hook spec applies. If empty, it applies + to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources + to which this hook spec applies. If empty, it applies + to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the + resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks + to execute after storing the item in the backup. These + are executed after all "additional items" from item + actions are processed. + items: + description: BackupResourceHook defines a hook for a + resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments + to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the + pod where the command should be executed. + If not specified, the pod's first container + is used. + type: string + onError: + description: OnError specifies how Velero should + behave if it encounters an error executing + this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount + of time Velero should wait for the hook to + complete before considering the execution + a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks + to execute prior to storing the item in the backup. + These are executed before any "additional items" from + item actions are processed. + items: + description: BackupResourceHook defines a hook for a + resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments + to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the + pod where the command should be executed. + If not specified, the pod's first container + is used. + type: string + onError: + description: OnError specifies how Velero should + behave if it encounters an error executing + this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount + of time Velero should wait for the hook to + complete before considering the execution + a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped + resources should be included for consideration in the backup. + nullable: true + type: boolean + includedClusterScopedResources: + description: IncludedClusterScopedResources is a slice of cluster-scoped + resource type names to include in the backup. If set to "*", all + cluster-scoped resource types are included. The default value + is empty, which means only related cluster-scoped resources are + included. + items: + type: string + nullable: true + type: array + includedNamespaceScopedResources: + description: IncludedNamespaceScopedResources is a slice of namespace-scoped + resource type names to include in the backup. The default value + is "*". + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to + include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include + in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + itemOperationTimeout: + description: ItemOperationTimeout specifies the time used to wait + for asynchronous BackupItemAction operations The default value + is 1 hour. + type: string + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with + when adding individual objects to the backup. If empty or nil, + all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + metadata: + properties: + labels: + additionalProperties: + type: string + type: object + type: object + orLabelSelectors: + description: OrLabelSelectors is list of metav1.LabelSelector to + filter with when adding individual objects to the backup. If multiple + provided they will be joined by the OR operator. LabelSelector + as well as OrLabelSelectors cannot co-exist in backup request, + only one of them can be used. + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of resources + of specific Kind. The map key is the resource name and value is + a list of object names separated by commas. Each resource name + has format "namespace/objectname". For cluster resources, simply + use "objectname". + nullable: true + type: object + resourcePolicy: + description: ResourcePolicy specifies the referenced resource policies + that backup should follow + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in + the core API group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + snapshotMoveData: + description: SnapshotMoveData specifies whether snapshot data should + be moved + nullable: true + type: boolean + snapshotVolumes: + description: SnapshotVolumes specifies whether to take snapshots + of any PV's referenced in the set of objects included in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of + a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing + how long the Backup should be retained for. + type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the + uploader. + nullable: true + properties: + parallelFilesUpload: + description: ParallelFilesUpload is the number of files parallel + uploads to perform when using the uploader. + type: integer + type: object + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names + of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + status: + description: BackupStatus captures the current status of a Velero backup. + properties: + backupItemOperationsAttempted: + description: BackupItemOperationsAttempted is the total number of + attempted async BackupItemAction operations for this backup. + type: integer + backupItemOperationsCompleted: + description: BackupItemOperationsCompleted is the total number of + successfully completed async BackupItemAction operations for this + backup. + type: integer + backupItemOperationsFailed: + description: BackupItemOperationsFailed is the total number of async + BackupItemAction operations for this backup which ended with an + error. + type: integer + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. Completion + time is recorded before uploading the backup object. The server's + time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + csiVolumeSnapshotsAttempted: + description: CSIVolumeSnapshotsAttempted is the total number of + attempted CSI VolumeSnapshots for this backup. + type: integer + csiVolumeSnapshotsCompleted: + description: CSIVolumeSnapshotsCompleted is the total number of + successfully completed CSI VolumeSnapshots for this backup. + type: integer + errors: + description: Errors is a count of all error messages that were generated + during execution of the backup. The actual errors are in the + backup's log file in object storage. + type: integer + expiration: + description: Expiration is when this Backup is eligible for garbage-collection. + format: date-time + nullable: true + type: string + failureReason: + description: FailureReason is an error that caused the entire backup + to fail. + type: string + formatVersion: + description: FormatVersion is the backup format version, including + major, minor, and patch version. + type: string + hookStatus: + description: HookStatus contains information about the status of + the hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted + hooks Specifically, HooksAttempted represents the number of + hooks that failed to execute and the number of hooks that + executed successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which + ended with an error + type: integer + type: object + phase: + description: Phase is the current state of the Backup. + enum: + - New + - FailedValidation + - InProgress + - WaitingForPluginOperations + - WaitingForPluginOperationsPartiallyFailed + - Finalizing + - FinalizingPartiallyFailed + - Completed + - PartiallyFailed + - Failed + - Deleting + type: string + progress: + description: Progress contains information about the backup's execution + progress. Note that this information is best-effort only -- if + Velero fails to update it during a backup for any reason, it may + be inaccurate/stale. + nullable: true + properties: + itemsBackedUp: + description: ItemsBackedUp is the number of items that have + actually been written to the backup tarball so far. + type: integer + totalItems: + description: TotalItems is the total number of items to be backed + up. This number may change throughout the execution of the + backup due to plugins that return additional related items + to back up, the velero.io/exclude-from-backup label, and various + other filters that happen as items are processed. + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a backup was started. + Separate from CreationTimestamp, since that value changes on restores. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors + (if applicable). + items: + type: string + nullable: true + type: array + version: + description: 'Version is the backup format major version. Deprecated: + Please see FormatVersion' + type: integer + volumeSnapshotsAttempted: + description: VolumeSnapshotsAttempted is the total number of attempted + volume snapshots for this backup. + type: integer + volumeSnapshotsCompleted: + description: VolumeSnapshotsCompleted is the total number of successfully + completed volume snapshots for this backup. + type: integer + warnings: + description: Warnings is a count of all warning messages that were + generated during execution of the backup. The actual warnings + are in the backup's log file in object storage. + type: integer + type: object + type: object + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: backupstoragelocations.velero.io + spec: + group: velero.io + names: + kind: BackupStorageLocation + listKind: BackupStorageLocationList + plural: backupstoragelocations + shortNames: + - bsl + singular: backupstoragelocation + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Backup Storage Location status such as Available/Unavailable + jsonPath: .status.phase + name: Phase + type: string + - description: LastValidationTime is the last time the backup store location + was validated + jsonPath: .status.lastValidationTime + name: Last Validated + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Default backup storage location + jsonPath: .spec.default + name: Default + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: BackupStorageLocation is a location where Velero stores backup + objects + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupStorageLocationSpec defines the desired state of + a Velero BackupStorageLocation + properties: + accessMode: + description: AccessMode defines the permissions for the backup storage + location. + enum: + - ReadOnly + - ReadWrite + type: string + backupSyncPeriod: + description: BackupSyncPeriod defines how frequently to sync backup + API objects from object storage. A value of 0 disables sync. + nullable: true + type: string + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + credential: + description: Credential contains the credential information intended + to be used with this location + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + default: + description: Default indicates this location is the default backup + storage location. + type: boolean + objectStorage: + description: ObjectStorageLocation specifies the settings necessary + to connect to a provider's object storage. + properties: + bucket: + description: Bucket is the bucket to use for object storage. + type: string + caCert: + description: CACert defines a CA bundle to use when verifying + TLS connections to the provider. + format: byte + type: string + prefix: + description: Prefix is the path inside a bucket to use for Velero + storage. Optional. + type: string + required: + - bucket + type: object + provider: + description: Provider is the provider of the backup storage. + type: string + validationFrequency: + description: ValidationFrequency defines how frequently to validate + the corresponding object storage. A value of 0 disables validation. + nullable: true + type: string + required: + - objectStorage + - provider + type: object + status: + description: BackupStorageLocationStatus defines the observed state + of BackupStorageLocation + properties: + accessMode: + description: "AccessMode is an unused field. \n Deprecated: there + is now an AccessMode field on the Spec and this field will be + removed entirely as of v2.0." + enum: + - ReadOnly + - ReadWrite + type: string + lastSyncedRevision: + description: "LastSyncedRevision is the value of the `metadata/revision` + file in the backup storage location the last time the BSL's contents + were synced into the cluster. \n Deprecated: this field is no + longer updated or used for detecting changes to the location's + contents and will be removed entirely in v2.0." + type: string + lastSyncedTime: + description: LastSyncedTime is the last time the contents of the + location were synced into the cluster. + format: date-time + nullable: true + type: string + lastValidationTime: + description: LastValidationTime is the last time the backup store + location was validated the cluster. + format: date-time + nullable: true + type: string + message: + description: Message is a message about the backup storage location's + status. + type: string + phase: + description: Phase is the current state of the BackupStorageLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: deletebackuprequests.velero.io + spec: + group: velero.io + names: + kind: DeleteBackupRequest + listKind: DeleteBackupRequestList + plural: deletebackuprequests + singular: deletebackuprequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The name of the backup to be deleted + jsonPath: .spec.backupName + name: BackupName + type: string + - description: The status of the deletion request + jsonPath: .status.phase + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: DeleteBackupRequest is a request to delete one or more backups. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DeleteBackupRequestSpec is the specification for which + backups to delete. + properties: + backupName: + type: string + required: + - backupName + type: object + status: + description: DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. + properties: + errors: + description: Errors contains any errors that were encountered during + the deletion process. + items: + type: string + nullable: true + type: array + phase: + description: Phase is the current state of the DeleteBackupRequest. + enum: + - New + - InProgress + - Processed + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: downloadrequests.velero.io + spec: + group: velero.io + names: + kind: DownloadRequest + listKind: DownloadRequestList + plural: downloadrequests + singular: downloadrequest + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: DownloadRequest is a request to download an artifact from backup + object storage, such as a backup log file. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DownloadRequestSpec is the specification for a download + request. + properties: + target: + description: Target is what to download (e.g. logs for a backup). + properties: + kind: + description: Kind is the type of file to download. + enum: + - BackupLog + - BackupContents + - BackupVolumeSnapshots + - BackupItemOperations + - BackupResourceList + - BackupResults + - RestoreLog + - RestoreResults + - RestoreResourceList + - RestoreItemOperations + - CSIBackupVolumeSnapshots + - CSIBackupVolumeSnapshotContents + - BackupVolumeInfos + type: string + name: + description: Name is the name of the Kubernetes resource with + which the file is associated. + type: string + required: + - kind + - name + type: object + required: + - target + type: object + status: + description: DownloadRequestStatus is the current status of a DownloadRequest. + properties: + downloadURL: + description: DownloadURL contains the pre-signed URL for the target + file. + type: string + expiration: + description: Expiration is when this DownloadRequest expires and + can be deleted by the system. + format: date-time + nullable: true + type: string + phase: + description: Phase is the current state of the DownloadRequest. + enum: + - New + - Processed + type: string + type: object + type: object + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: podvolumebackups.velero.io + spec: + group: velero.io + names: + kind: PodVolumeBackup + listKind: PodVolumeBackupList + plural: podvolumebackups + singular: podvolumebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Pod Volume Backup status such as New/InProgress + jsonPath: .status.phase + name: Status + type: string + - description: Time when this backup was started + jsonPath: .status.startTimestamp + name: Created + type: date + - description: Namespace of the pod containing the volume to be backed up + jsonPath: .spec.pod.namespace + name: Namespace + type: string + - description: Name of the pod containing the volume to be backed up + jsonPath: .spec.pod.name + name: Pod + type: string + - description: Name of the volume to be backed up + jsonPath: .spec.volume + name: Volume + type: string + - description: The type of the uploader to handle data transfer + jsonPath: .spec.uploaderType + name: Uploader Type + type: string + - description: Name of the Backup Storage Location where this backup should + be stored + jsonPath: .spec.backupStorageLocation + name: Storage Location + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeBackupSpec is the specification for a PodVolumeBackup. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage + location where the backup repository is stored. + type: string + node: + description: Node is the name of the node that the Pod is running + on. + type: string + pod: + description: Pod is a reference to the pod containing the volume + to be backed up. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + repoIdentifier: + description: RepoIdentifier is the backup repository identifier. + type: string + tags: + additionalProperties: + type: string + description: Tags are a map of key-value pairs that should be applied + to the volume backup as tags. + type: object + uploaderSettings: + additionalProperties: + type: string + description: UploaderSettings are a map of key-value pairs that + should be applied to the uploader configuration. + nullable: true + type: object + uploaderType: + description: UploaderType is the type of the uploader to handle + the data transfer. + enum: + - kopia + - restic + - "" + type: string + volume: + description: Volume is the name of the volume within the Pod to + be backed up. + type: string + required: + - backupStorageLocation + - node + - pod + - repoIdentifier + - volume + type: object + status: + description: PodVolumeBackupStatus is the current status of a PodVolumeBackup. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. Completion + time is recorded before uploading the backup object. The server's + time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume backup's + status. + type: string + path: + description: Path is the full path within the controller pod being + backed up. + type: string + phase: + description: Phase is the current state of the PodVolumeBackup. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the volume + and the current number of backed up bytes. This can be used to + display progress information about the backup operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + snapshotID: + description: SnapshotID is the identifier for the snapshot of the + pod volume. + type: string + startTimestamp: + description: StartTimestamp records the time a backup was started. + Separate from CreationTimestamp, since that value changes on restores. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: podvolumerestores.velero.io + spec: + group: velero.io + names: + kind: PodVolumeRestore + listKind: PodVolumeRestoreList + plural: podvolumerestores + singular: podvolumerestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Namespace of the pod containing the volume to be restored + jsonPath: .spec.pod.namespace + name: Namespace + type: string + - description: Name of the pod containing the volume to be restored + jsonPath: .spec.pod.name + name: Pod + type: string + - description: The type of the uploader to handle data transfer + jsonPath: .spec.uploaderType + name: Uploader Type + type: string + - description: Name of the volume to be restored + jsonPath: .spec.volume + name: Volume + type: string + - description: Pod Volume Restore status such as New/InProgress + jsonPath: .status.phase + name: Status + type: string + - description: Pod Volume Restore status such as New/InProgress + format: int64 + jsonPath: .status.progress.totalBytes + name: TotalBytes + type: integer + - description: Pod Volume Restore status such as New/InProgress + format: int64 + jsonPath: .status.progress.bytesDone + name: BytesDone + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeRestoreSpec is the specification for a PodVolumeRestore. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage + location where the backup repository is stored. + type: string + pod: + description: Pod is a reference to the pod containing the volume + to be restored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + repoIdentifier: + description: RepoIdentifier is the backup repository identifier. + type: string + snapshotID: + description: SnapshotID is the ID of the volume snapshot to be restored. + type: string + sourceNamespace: + description: SourceNamespace is the original namespace for namaspace + mapping. + type: string + uploaderSettings: + additionalProperties: + type: string + description: UploaderSettings are a map of key-value pairs that + should be applied to the uploader configuration. + nullable: true + type: object + uploaderType: + description: UploaderType is the type of the uploader to handle + the data transfer. + enum: + - kopia + - restic + - "" + type: string + volume: + description: Volume is the name of the volume within the Pod to + be restored. + type: string + required: + - backupStorageLocation + - pod + - repoIdentifier + - snapshotID + - sourceNamespace + - volume + type: object + status: + description: PodVolumeRestoreStatus is the current status of a PodVolumeRestore. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a restore was + completed. Completion time is recorded even on failed restores. + The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume restore's + status. + type: string + phase: + description: Phase is the current state of the PodVolumeRestore. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the snapshot + and the current number of restored bytes. This can be used to + display progress information about the restore operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a restore was started. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: restores.velero.io + spec: + group: velero.io + names: + kind: Restore + listKind: RestoreList + plural: restores + singular: restore + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Restore is a Velero resource that represents the application + of resources from a Velero backup to a target Kubernetes cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSpec defines the specification for a Velero restore. + properties: + backupName: + description: BackupName is the unique name of the Velero backup + to restore from. + type: string + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that + are not included in the restore. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that + are not included in the restore. + items: + type: string + nullable: true + type: array + existingResourcePolicy: + description: ExistingResourcePolicy specifies the restore behavior + for the Kubernetes resource to be restored + nullable: true + type: string + hooks: + description: Hooks represent custom behaviors that should be executed + during or post restore. + properties: + resources: + items: + description: RestoreResourceHookSpec defines one or more RestoreResrouceHooks + that should be executed based on the rules defined for namespaces, + resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces + to which this hook spec applies. If empty, it applies + to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources + to which this hook spec applies. If empty, it applies + to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the + resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name is the name of this hook. + type: string + postHooks: + description: PostHooks is a list of RestoreResourceHooks + to execute during and after restoring a resource. + items: + description: RestoreResourceHook defines a restore hook + for a resource. + properties: + exec: + description: Exec defines an exec restore hook. + properties: + command: + description: Command is the command and arguments + to execute from within a container after a + pod has been restored. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the + pod where the command should be executed. + If not specified, the pod's first container + is used. + type: string + execTimeout: + description: ExecTimeout defines the maximum + amount of time Velero should wait for the + hook to complete before considering the execution + a failure. + type: string + onError: + description: OnError specifies how Velero should + behave if it encounters an error executing + this hook. + enum: + - Continue + - Fail + type: string + waitForReady: + description: WaitForReady ensures command will + be launched when container is Ready instead + of Running. + nullable: true + type: boolean + waitTimeout: + description: WaitTimeout defines the maximum + amount of time Velero should wait for the + container to be Ready before attempting to + run the command. + type: string + required: + - command + type: object + init: + description: Init defines an init restore hook. + properties: + initContainers: + description: InitContainers is list of init + containers to be added to a pod during its + restore. + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + x-kubernetes-preserve-unknown-fields: true + timeout: + description: Timeout defines the maximum amount + of time Velero should wait for the initContainers + to complete. + type: string + type: object + type: object + type: array + required: + - name + type: object + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped + resources should be included for consideration in the restore. + If null, defaults to true. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to + include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include + in the restore. If empty, all resources in the backup are included. + items: + type: string + nullable: true + type: array + itemOperationTimeout: + description: ItemOperationTimeout specifies the time used to wait + for RestoreItemAction operations The default value is 1 hour. + type: string + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with + when restoring individual objects from the backup. If empty or + nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceMapping: + additionalProperties: + type: string + description: NamespaceMapping is a map of source namespace names + to target namespace names to restore into. Any source namespaces + not included in the map will be restored into namespaces of the + same name. + type: object + orLabelSelectors: + description: OrLabelSelectors is list of metav1.LabelSelector to + filter with when restoring individual objects from the backup. + If multiple provided they will be joined by the OR operator. LabelSelector + as well as OrLabelSelectors cannot co-exist in restore request, + only one of them can be used + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + preserveNodePorts: + description: PreserveNodePorts specifies whether to restore old + nodePorts from backup. + nullable: true + type: boolean + resourceModifier: + description: ResourceModifier specifies the reference to JSON resource + patches that should be applied to resources before restoration. + nullable: true + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in + the core API group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + restorePVs: + description: RestorePVs specifies whether to restore all included + PVs from snapshot + nullable: true + type: boolean + restoreStatus: + description: RestoreStatus specifies which resources we should restore + the status field. If nil, no objects are included. Optional. + nullable: true + properties: + excludedResources: + description: ExcludedResources specifies the resources to which + will not restore the status. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which + will restore the status. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + type: object + scheduleName: + description: ScheduleName is the unique name of the Velero schedule + to restore from. If specified, and BackupName is empty, Velero + will restore from the most recent successful backup created from + this schedule. + type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the + restore. + nullable: true + properties: + writeSparseFiles: + description: WriteSparseFiles is a flag to indicate whether + write files sparsely or not. + nullable: true + type: boolean + type: object + required: + - backupName + type: object + status: + description: RestoreStatus captures the current status of a Velero restore + properties: + completionTimestamp: + description: CompletionTimestamp records the time the restore operation + was completed. Completion time is recorded even on failed restore. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + errors: + description: Errors is a count of all error messages that were generated + during execution of the restore. The actual errors are stored + in object storage. + type: integer + failureReason: + description: FailureReason is an error that caused the entire restore + to fail. + type: string + hookStatus: + description: HookStatus contains information about the status of + the hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted + hooks Specifically, HooksAttempted represents the number of + hooks that failed to execute and the number of hooks that + executed successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which + ended with an error + type: integer + type: object + phase: + description: Phase is the current state of the Restore + enum: + - New + - FailedValidation + - InProgress + - WaitingForPluginOperations + - WaitingForPluginOperationsPartiallyFailed + - Completed + - PartiallyFailed + - Failed + type: string + progress: + description: Progress contains information about the restore's execution + progress. Note that this information is best-effort only -- if + Velero fails to update it during a restore for any reason, it + may be inaccurate/stale. + nullable: true + properties: + itemsRestored: + description: ItemsRestored is the number of items that have + actually been restored so far + type: integer + totalItems: + description: TotalItems is the total number of items to be restored. + This number may change throughout the execution of the restore + due to plugins that return additional related items to restore + type: integer + type: object + restoreItemOperationsAttempted: + description: RestoreItemOperationsAttempted is the total number + of attempted async RestoreItemAction operations for this restore. + type: integer + restoreItemOperationsCompleted: + description: RestoreItemOperationsCompleted is the total number + of successfully completed async RestoreItemAction operations for + this restore. + type: integer + restoreItemOperationsFailed: + description: RestoreItemOperationsFailed is the total number of + async RestoreItemAction operations for this restore which ended + with an error. + type: integer + startTimestamp: + description: StartTimestamp records the time the restore operation + was started. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors + (if applicable) + items: + type: string + nullable: true + type: array + warnings: + description: Warnings is a count of all warning messages that were + generated during execution of the restore. The actual warnings + are stored in object storage. + type: integer + type: object + type: object + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: schedules.velero.io + spec: + group: velero.io + names: + kind: Schedule + listKind: ScheduleList + plural: schedules + singular: schedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the schedule + jsonPath: .status.phase + name: Status + type: string + - description: A Cron expression defining when to run the Backup + jsonPath: .spec.schedule + name: Schedule + type: string + - description: The last time a Backup was run for this schedule + jsonPath: .status.lastBackup + name: LastBackup + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.paused + name: Paused + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: Schedule is a Velero resource that represents a pre-scheduled + or periodic Backup that should be run. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the specification for a Velero schedule + properties: + paused: + description: Paused specifies whether the schedule is paused or + not + type: boolean + schedule: + description: Schedule is a Cron expression defining when to run + the Backup. + type: string + skipImmediately: + description: 'SkipImmediately specifies whether to skip backup if + schedule is due immediately from `schedule.status.lastBackup` + timestamp when schedule is unpaused or if schedule is new. If + true, backup will be skipped immediately when schedule is unpaused + if it is due based on .Status.LastBackupTimestamp or schedule + is new, and will run at next schedule time. If false, backup will + not be skipped immediately when schedule is unpaused, but will + run at next schedule time. If empty, will follow server configuration + (default: false).' + type: boolean + template: + description: Template is the definition of the Backup to be run + on the provided schedule + properties: + csiSnapshotTimeout: + description: CSISnapshotTimeout specifies the time used to wait + for CSI VolumeSnapshot status turns to ReadyToUse during creation, + before returning error as timeout. The default value is 10 + minute. + type: string + datamover: + description: DataMover specifies the data mover to be used by + the backup. If DataMover is "" or "velero", the built-in data + mover will be used. + type: string + defaultVolumesToFsBackup: + description: DefaultVolumesToFsBackup specifies whether pod + volume file system backup should be used for all volumes by + default. + nullable: true + type: boolean + defaultVolumesToRestic: + description: "DefaultVolumesToRestic specifies whether restic + should be used to take a backup of all pod volumes by default. + \n Deprecated: this field is no longer used and will be removed + entirely in future. Use DefaultVolumesToFsBackup instead." + nullable: true + type: boolean + excludedClusterScopedResources: + description: ExcludedClusterScopedResources is a slice of cluster-scoped + resource type names to exclude from the backup. If set to + "*", all cluster-scoped resource types are excluded. The default + value is empty. + items: + type: string + nullable: true + type: array + excludedNamespaceScopedResources: + description: ExcludedNamespaceScopedResources is a slice of + namespace-scoped resource type names to exclude from the backup. + If set to "*", all namespace-scoped resource types are excluded. + The default value is empty. + items: + type: string + nullable: true + type: array + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces + that are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names + that are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be + executed at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed + when backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more + BackupResourceHooks that should be executed based on + the rules defined for namespaces, resources, and label + selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces + to which this hook spec applies. If empty, it applies + to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources + to which this hook spec applies. If empty, it applies + to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters + the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks + to execute after storing the item in the backup. + These are executed after all "additional items" + from item actions are processed. + items: + description: BackupResourceHook defines a hook for + a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and + arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container + in the pod where the command should be + executed. If not specified, the pod's + first container is used. + type: string + onError: + description: OnError specifies how Velero + should behave if it encounters an error + executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum + amount of time Velero should wait for + the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks + to execute prior to storing the item in the backup. + These are executed before any "additional items" + from item actions are processed. + items: + description: BackupResourceHook defines a hook for + a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and + arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container + in the pod where the command should be + executed. If not specified, the pod's + first container is used. + type: string + onError: + description: OnError specifies how Velero + should behave if it encounters an error + executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum + amount of time Velero should wait for + the hook to complete before considering + the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped + resources should be included for consideration in the backup. + nullable: true + type: boolean + includedClusterScopedResources: + description: IncludedClusterScopedResources is a slice of cluster-scoped + resource type names to include in the backup. If set to "*", + all cluster-scoped resource types are included. The default + value is empty, which means only related cluster-scoped resources + are included. + items: + type: string + nullable: true + type: array + includedNamespaceScopedResources: + description: IncludedNamespaceScopedResources is a slice of + namespace-scoped resource type names to include in the backup. + The default value is "*". + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names + to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names + to include in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + itemOperationTimeout: + description: ItemOperationTimeout specifies the time used to + wait for asynchronous BackupItemAction operations The default + value is 1 hour. + type: string + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter + with when adding individual objects to the backup. If empty + or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + metadata: + properties: + labels: + additionalProperties: + type: string + type: object + type: object + orLabelSelectors: + description: OrLabelSelectors is list of metav1.LabelSelector + to filter with when adding individual objects to the backup. + If multiple provided they will be joined by the OR operator. + LabelSelector as well as OrLabelSelectors cannot co-exist + in backup request, only one of them can be used. + items: + description: A label selector is a label query over a set + of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of + resources of specific Kind. The map key is the resource name + and value is a list of object names separated by commas. Each + resource name has format "namespace/objectname". For cluster + resources, simply use "objectname". + nullable: true + type: object + resourcePolicy: + description: ResourcePolicy specifies the referenced resource + policies that backup should follow + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + snapshotMoveData: + description: SnapshotMoveData specifies whether snapshot data + should be moved + nullable: true + type: boolean + snapshotVolumes: + description: SnapshotVolumes specifies whether to take snapshots + of any PV's referenced in the set of objects included in the + Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name + of a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing + how long the Backup should be retained for. + type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for + the uploader. + nullable: true + properties: + parallelFilesUpload: + description: ParallelFilesUpload is the number of files + parallel uploads to perform when using the uploader. + type: integer + type: object + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names + of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + useOwnerReferencesInBackup: + description: UseOwnerReferencesBackup specifies whether to use OwnerReferences + on backups created by this Schedule. + nullable: true + type: boolean + required: + - schedule + - template + type: object + status: + description: ScheduleStatus captures the current state of a Velero schedule + properties: + lastBackup: + description: LastBackup is the last time a Backup was run for this + Schedule schedule + format: date-time + nullable: true + type: string + lastSkipped: + description: LastSkipped is the last time a Schedule was skipped + format: date-time + nullable: true + type: string + phase: + description: Phase is the current phase of the Schedule + enum: + - New + - Enabled + - FailedValidation + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors + (if applicable) + items: + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: serverstatusrequests.velero.io + spec: + group: velero.io + names: + kind: ServerStatusRequest + listKind: ServerStatusRequestList + plural: serverstatusrequests + shortNames: + - ssr + singular: serverstatusrequest + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServerStatusRequest is a request to access current status information + about the Velero server. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ServerStatusRequestSpec is the specification for a ServerStatusRequest. + type: object + status: + description: ServerStatusRequestStatus is the current status of a ServerStatusRequest. + properties: + phase: + description: Phase is the current lifecycle phase of the ServerStatusRequest. + enum: + - New + - Processed + type: string + plugins: + description: Plugins list information about the plugins running + on the Velero server + items: + description: PluginInfo contains attributes of a Velero plugin + properties: + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + nullable: true + type: array + processedTimestamp: + description: ProcessedTimestamp is when the ServerStatusRequest + was processed by the ServerStatusRequestController. + format: date-time + nullable: true + type: string + serverVersion: + description: ServerVersion is the Velero server version. + type: string + type: object + type: object + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: volumesnapshotlocations.velero.io + spec: + group: velero.io + names: + kind: VolumeSnapshotLocation + listKind: VolumeSnapshotLocationList + plural: volumesnapshotlocations + shortNames: + - vsl + singular: volumesnapshotlocation + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeSnapshotLocation is a location where Velero stores volume + snapshots. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeSnapshotLocationSpec defines the specification for + a Velero VolumeSnapshotLocation. + properties: + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + credential: + description: Credential contains the credential information intended + to be used with this location + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + provider: + description: Provider is the provider of the volume storage. + type: string + required: + - provider + type: object + status: + description: VolumeSnapshotLocationStatus describes the current status + of a Velero VolumeSnapshotLocation. + properties: + phase: + description: VolumeSnapshotLocationPhase is the lifecycle phase + of a Velero VolumeSnapshotLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: datadownloads.velero.io + spec: + group: velero.io + names: + kind: DataDownload + listKind: DataDownloadList + plural: datadownloads + singular: datadownload + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: DataDownload status such as New/InProgress + jsonPath: .status.phase + name: Status + type: string + - description: Time duration since this DataDownload was started + jsonPath: .status.startTimestamp + name: Started + type: date + - description: Completed bytes + format: int64 + jsonPath: .status.progress.bytesDone + name: Bytes Done + type: integer + - description: Total bytes + format: int64 + jsonPath: .status.progress.totalBytes + name: Total Bytes + type: integer + - description: Name of the Backup Storage Location where the backup data is + stored + jsonPath: .spec.backupStorageLocation + name: Storage Location + type: string + - description: Time duration since this DataDownload was created + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Name of the node where the DataDownload is processed + jsonPath: .status.node + name: Node + type: string + name: v2alpha1 + schema: + openAPIV3Schema: + description: DataDownload acts as the protocol between data mover plugins + and data mover controller for the datamover restore operation + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DataDownloadSpec is the specification for a DataDownload. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage + location where the backup repository is stored. + type: string + cancel: + description: Cancel indicates request to cancel the ongoing DataDownload. + It can be set when the DataDownload is in InProgress phase + type: boolean + dataMoverConfig: + additionalProperties: + type: string + description: DataMoverConfig is for data-mover-specific configuration + fields. + type: object + datamover: + description: DataMover specifies the data mover to be used by the + backup. If DataMover is "" or "velero", the built-in data mover + will be used. + type: string + operationTimeout: + description: OperationTimeout specifies the time used to wait internal + operations, before returning error as timeout. + type: string + snapshotID: + description: SnapshotID is the ID of the Velero backup snapshot + to be restored from. + type: string + sourceNamespace: + description: SourceNamespace is the original namespace where the + volume is backed up from. It may be different from SourcePVC's + namespace if namespace is remapped during restore. + type: string + targetVolume: + description: TargetVolume is the information of the target PVC and + PV. + properties: + namespace: + description: Namespace is the target namespace + type: string + pv: + description: PV is the name of the target PV that is created + by Velero restore + type: string + pvc: + description: PVC is the name of the target PVC that is created + by Velero restore + type: string + required: + - namespace + - pv + - pvc + type: object + required: + - backupStorageLocation + - operationTimeout + - snapshotID + - sourceNamespace + - targetVolume + type: object + status: + description: DataDownloadStatus is the current status of a DataDownload. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a restore was + completed. Completion time is recorded even on failed restores. + The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the DataDownload's status. + type: string + node: + description: Node is name of the node where the DataDownload is + processed. + type: string + phase: + description: Phase is the current state of the DataDownload. + enum: + - New + - Accepted + - Prepared + - InProgress + - Canceling + - Canceled + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the snapshot + and the current number of restored bytes. This can be used to + display progress information about the restore operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a restore was started. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + labels: + component: velero + name: datauploads.velero.io + spec: + group: velero.io + names: + kind: DataUpload + listKind: DataUploadList + plural: datauploads + singular: dataupload + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: DataUpload status such as New/InProgress + jsonPath: .status.phase + name: Status + type: string + - description: Time duration since this DataUpload was started + jsonPath: .status.startTimestamp + name: Started + type: date + - description: Completed bytes + format: int64 + jsonPath: .status.progress.bytesDone + name: Bytes Done + type: integer + - description: Total bytes + format: int64 + jsonPath: .status.progress.totalBytes + name: Total Bytes + type: integer + - description: Name of the Backup Storage Location where this backup should + be stored + jsonPath: .spec.backupStorageLocation + name: Storage Location + type: string + - description: Time duration since this DataUpload was created + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Name of the node where the DataUpload is processed + jsonPath: .status.node + name: Node + type: string + name: v2alpha1 + schema: + openAPIV3Schema: + description: DataUpload acts as the protocol between data mover plugins + and data mover controller for the datamover backup operation + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DataUploadSpec is the specification for a DataUpload. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage + location where the backup repository is stored. + type: string + cancel: + description: Cancel indicates request to cancel the ongoing DataUpload. + It can be set when the DataUpload is in InProgress phase + type: boolean + csiSnapshot: + description: If SnapshotType is CSI, CSISnapshot provides the information + of the CSI snapshot. + nullable: true + properties: + snapshotClass: + description: SnapshotClass is the name of the snapshot class + that the volume snapshot is created with + type: string + storageClass: + description: StorageClass is the name of the storage class of + the PVC that the volume snapshot is created from + type: string + volumeSnapshot: + description: VolumeSnapshot is the name of the volume snapshot + to be backed up + type: string + required: + - storageClass + - volumeSnapshot + type: object + dataMoverConfig: + additionalProperties: + type: string + description: DataMoverConfig is for data-mover-specific configuration + fields. + nullable: true + type: object + datamover: + description: DataMover specifies the data mover to be used by the + backup. If DataMover is "" or "velero", the built-in data mover + will be used. + type: string + operationTimeout: + description: OperationTimeout specifies the time used to wait internal + operations, before returning error as timeout. + type: string + snapshotType: + description: SnapshotType is the type of the snapshot to be backed + up. + type: string + sourceNamespace: + description: SourceNamespace is the original namespace where the + volume is backed up from. It is the same namespace for SourcePVC + and CSI namespaced objects. + type: string + sourcePVC: + description: SourcePVC is the name of the PVC which the snapshot + is taken for. + type: string + required: + - backupStorageLocation + - operationTimeout + - snapshotType + - sourceNamespace + - sourcePVC + type: object + status: + description: DataUploadStatus is the current status of a DataUpload. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. + Completion time is recorded even on failed backups. Completion + time is recorded before uploading the backup object. The server's + time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + dataMoverResult: + additionalProperties: + type: string + description: DataMoverResult stores data-mover-specific information + as a result of the DataUpload. + nullable: true + type: object + message: + description: Message is a message about the DataUpload's status. + type: string + node: + description: Node is name of the node where the DataUpload is processed. + type: string + path: + description: Path is the full path of the snapshot volume being + backed up. + type: string + phase: + description: Phase is the current state of the DataUpload. + enum: + - New + - Accepted + - Prepared + - InProgress + - Canceling + - Canceled + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the volume + and the current number of backed up bytes. This can be used to + display progress information about the backup operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + snapshotID: + description: SnapshotID is the identifier for the snapshot in the + backup repository. + type: string + startTimestamp: + description: StartTimestamp records the time a backup was started. + Separate from CreationTimestamp, since that value changes on restores. + The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: {} +- apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + labels: + component: velero + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/audit-version: latest + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: privileged + pod-security.kubernetes.io/warn-version: latest + name: velero + spec: {} +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + creationTimestamp: null + labels: + component: velero + name: velero + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: velero + namespace: velero +- apiVersion: v1 + kind: ServiceAccount + metadata: + creationTimestamp: null + labels: + component: velero + name: velero + namespace: velero +- apiVersion: v1 + data: + cloud: W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gdmVsZXJvCmF3c19zZWNyZXRfYWNjZXNzX2tleSA9IDRJc1RoZU1pbmRLaWxsZXIK + kind: Secret + metadata: + creationTimestamp: null + labels: + component: velero + name: cloud-credentials + namespace: velero + type: Opaque +- apiVersion: velero.io/v1 + kind: BackupStorageLocation + metadata: + creationTimestamp: null + labels: + component: velero + name: default + namespace: velero + spec: + config: + region: minio + s3ForcePathStyle: "true" + s3Url: http://pbs.undercloud.local:9000 + default: true + objectStorage: + bucket: velero + provider: aws +- apiVersion: velero.io/v1 + kind: VolumeSnapshotLocation + metadata: + creationTimestamp: null + labels: + component: velero + name: default + namespace: velero + spec: + provider: aws +- apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + component: velero + name: velero + namespace: velero + spec: + selector: + matchLabels: + deploy: velero + strategy: {} + template: + metadata: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8085" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + component: velero + deploy: velero + spec: + containers: + - args: + - server + - --features= + - --uploader-type=kopia + command: + - /velero + env: + - name: VELERO_SCRATCH_DIR + value: /scratch + - name: VELERO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_LIBRARY_PATH + value: /plugins + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /credentials/cloud + - name: AWS_SHARED_CREDENTIALS_FILE + value: /credentials/cloud + - name: AZURE_CREDENTIALS_FILE + value: /credentials/cloud + - name: ALIBABA_CLOUD_CREDENTIALS_FILE + value: /credentials/cloud + image: velero/velero:v1.13.2 + imagePullPolicy: IfNotPresent + name: velero + ports: + - containerPort: 8085 + name: metrics + resources: + limits: + cpu: "1" + memory: 512Mi + requests: + cpu: 500m + memory: 128Mi + volumeMounts: + - mountPath: /plugins + name: plugins + - mountPath: /scratch + name: scratch + - mountPath: /credentials + name: cloud-credentials + initContainers: + - image: velero/velero-plugin-for-aws:latest + imagePullPolicy: IfNotPresent + name: velero-velero-plugin-for-aws + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + - image: velero/velero-plugin-for-csi:latest + imagePullPolicy: IfNotPresent + name: velero-velero-plugin-for-csi + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + restartPolicy: Always + serviceAccountName: velero + volumes: + - emptyDir: {} + name: plugins + - emptyDir: {} + name: scratch + - name: cloud-credentials + secret: + secretName: cloud-credentials +kind: List +