This commit is contained in:
2025-08-01 17:32:15 +02:00
parent 6dd4e122bc
commit 708cf20493
4 changed files with 242 additions and 46 deletions

View File

@@ -64,7 +64,7 @@ storage:
contents:
inline: |
*******************************************************************
* AUTHORIZED ACCESS ONLY *
* AUTHORIZED ACCESS ONLY *
* *
* This system is part of a secured infrastructure. *
* All activities are monitored and logged. *
@@ -72,21 +72,41 @@ storage:
* may result in disciplinary and legal action. *
*******************************************************************
--------------------------------------------------------------------------------
kubernetes controle plane Node
Manage via:
kubectl (kubectl)
calico (calicoctl)
velero - backup (velero)
argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/
--------------------------------------------------------------------------------
- path: /etc/sysctl.d/99-k8s.conf
mode: 0644
contents:
inline: |
net.ipv4.ip_forward=1
net.ipv6.conf.all.forwarding=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.conf.all.rp_filter=0
net.ipv6.conf.all.disable_ipv6=0
vm.overcommit_memory=1
fs.inotify.max_user_watches=524288
fs.inotify.max_user_instances=512
kernel.panic=10
kernel.panic_on_oops=1
net.ipv4.ip_forward = 1
net.ipv6.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
net.ipv4.conf.all.forwarding = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.netfilter.nf_conntrack_max = 1000000
net.ipv4.conf.all.rp_filter = 0
net.ipv6.conf.all.disable_ipv6 = 0
vm.overcommit_memory = 1
fs.inotify.max_user_watches = 524288
fs.inotify.max_user_instances = 512
kernel.panic = 10
kernel.panic_on_oops = 1
- path: /etc/flatcar/update.conf
overwrite: true
mode: 0420
contents:
inline: |
REBOOT_STRATEGY=off
- path: /opt/bin/kubeadm
mode: 0755
@@ -187,7 +207,24 @@ systemd:
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
WantedBy=multi-user.
- name: set-timezone.service
enabled: true
contents: |
[Unit]
Description=Set Timezone
After=network-online.target
Wants=network-online.target
[Service]
StandardOutput=journal+console
StandardError=journal+console
Type=oneshot
Restart=on-failure
ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin
ExecStart=/usr/bin/timedatectl set-ntp true
[Install]
WantedBy=kubeadm.service
- name: kubelet.service
enabled: true
@@ -220,13 +257,20 @@ systemd:
[Service]
Type=oneshot
Environment="PATH=/opt/bin:/usr/bin:/bin:/usr/sbin:/sbin"
# Environment
Environment=KUBECONFIG=/etc/kubernetes/admin.conf
Environment=DATASTORE_TYPE=kubernetes
Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin:/opt/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent-uds/
ExecStartPre=/bin/sleep 30s
ExecStart=/opt/bin/kubeadm init --upload-certs --config=/etc/kubernetes/kubeadm-init.yaml
# copy files for kubectl
ExecStartPost=/usr/bin/mkdir -p /home/core/.kube
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
#ExecStartPost=/opt/bin/kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
Restart=on-failure
RestartSec=120s

View File

@@ -58,6 +58,56 @@ storage:
10.0.2.102 worker2.undercloud.local worker2
10.0.2.103 worker3.undercloud.local worker3
- path: /etc/motd
mode: 0644
overwrite: true
contents:
inline: |
*******************************************************************
* AUTHORIZED ACCESS ONLY *
* *
* This system is part of a secured infrastructure. *
* All activities are monitored and logged. *
* Unauthorized access or misuse is strictly prohibited and *
* may result in disciplinary and legal action. *
*******************************************************************
--------------------------------------------------------------------------------
kubernetes controle plane Node
Manage via:
kubectl (kubectl)
calico (calicoctl)
velero - backup (velero)
argocd https://argocd-server.argocd.svc.k8aux.undercloud.cf/
--------------------------------------------------------------------------------
- path: /etc/sysctl.d/99-k8s.conf
mode: 0644
contents:
inline: |
net.ipv4.ip_forward = 1
net.ipv6.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
net.ipv4.conf.all.forwarding = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.netfilter.nf_conntrack_max = 1000000
net.ipv4.conf.all.rp_filter = 0
net.ipv6.conf.all.disable_ipv6 = 0
vm.overcommit_memory = 1
fs.inotify.max_user_watches = 524288
fs.inotify.max_user_instances = 512
kernel.panic = 10
kernel.panic_on_oops = 1
- path: /etc/flatcar/update.conf
overwrite: true
mode: 0420
contents:
inline: |
REBOOT_STRATEGY=off
- path: /opt/bin/kubeadm
mode: 0755
contents:
@@ -94,14 +144,79 @@ storage:
criSocket: unix:///run/containerd/containerd.sock
kubeletExtraArgs:
node-ip: "fd00:0:0:2::92"
volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
discovery:
bootstrapToken:
apiServerEndpoint: "[fd00:0:0:2::100]:6443"
token: "abcdef.0123456789abcdef"
unsafeSkipCAVerification: true
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: "::"
healthzBindAddress: "::"
clusterDomain: "k8aux.undercloud.local"
clusterDNS:
- "2001:470:72f0:f:1::a"
cgroupDriver: "systemd"
volumePluginDir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
systemd:
units:
- name: modules-load.service
enabled: true
contents: |
[Unit]
Description=Load necessary kernel modules
Before=containerd.service kubeadm-init.service
[Service]
Type=oneshot
ExecStart=/usr/bin/modprobe br_netfilter
ExecStart=/usr/bin/modprobe overlay
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
- name: systemd-networkd-wait-online.service
enabled: true
- name: containerd.service
enabled: true
contents: |
[Unit]
Description=containerd container runtime
After=network.target modules-load.service
[Service]
ExecStart=/usr/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.
- name: set-timezone.service
enabled: true
contents: |
[Unit]
Description=Set Timezone
After=network-online.target
Wants=network-online.target
[Service]
StandardOutput=journal+console
StandardError=journal+console
Type=oneshot
Restart=on-failure
ExecStart=/usr/bin/timedatectl set-timezone Europe/Berlin
ExecStart=/usr/bin/timedatectl set-ntp true
[Install]
WantedBy=kubeadm.service
- name: kubelet.service
enabled: true
contents: |

View File

@@ -1,3 +1,7 @@
# ============ Deployment of the K8s cluster ==============
# = by Sebastian Gurlin for Undercloud =
# =========================================================
terraform {
required_providers {
proxmox = {
@@ -22,6 +26,10 @@ provider "proxmox" {
}
}
# ============ Download Flatcar QCOW ==============
resource "proxmox_virtual_environment_download_file" "flatcar_image" {
content_type = "import"
datastore_id = "cephfs" # oder dein ISO-Storage
@@ -31,7 +39,10 @@ resource "proxmox_virtual_environment_download_file" "flatcar_image" {
file_name = "flatcar_production_proxmoxve_image.qcow2" # wird als ISO gespeichert
}
# --- Butane zu Ignition ---
# ============== Butane zu Ignition ===============
data "ct_config" "control_plane1_ignition" {
content = file("${path.module}/control-plane1.bu")
strict = false
@@ -118,14 +129,19 @@ resource "proxmox_virtual_environment_file" "worker3_ignition" {
}
}
# --- flatcar template anlegen ---
# ============== flatcar template anlegen ================
resource "proxmox_virtual_environment_vm" "flatcar_template" {
name = "flatcar-template"
node_name = "hyper1"
template = true
started = false
name = "flatcar-template"
node_name = "hyper1"
template = true
started = false
stop_on_destroy = true
description = "managed by terraform - base template for flatcar"
description = "managed by terraform - base template for flatcar"
tags = ["flatcar","kubernetes","terraform"]
cpu {
cores = 1
@@ -161,15 +177,19 @@ resource "proxmox_virtual_environment_vm" "flatcar_template" {
lifecycle {
ignore_changes = [boot_order]
}
tags = ["flatcar","kubernetes","terraform"]
}
# --- control planes anlegen ---
# =============== deploy Control Plane ====================
resource "proxmox_virtual_environment_vm" "control_plane1" {
name = "control-plane1"
node_name = "hyper1"
description = "kubernetes control-plane1"
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [proxmox_virtual_environment_file.control_plane1_ignition]
# Hardware
cpu {
@@ -197,8 +217,6 @@ resource "proxmox_virtual_environment_vm" "control_plane1" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.control_plane1_ignition.id}"
}
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [proxmox_virtual_environment_file.control_plane1_ignition]
}
resource "null_resource" "wait_for_cp1" {
depends_on = [proxmox_virtual_environment_vm.control_plane1]
@@ -212,6 +230,11 @@ resource "proxmox_virtual_environment_vm" "control_plane2" {
name = "control-plane2"
node_name = "hyper2"
description = "kubernetes control-plane2"
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.control_plane2_ignition,
null_resource.wait_for_cp1
]
# Hardware
cpu {
@@ -239,16 +262,16 @@ resource "proxmox_virtual_environment_vm" "control_plane2" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.control_plane2_ignition.id}"
}
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.control_plane2_ignition,
null_resource.wait_for_cp1
]
}
resource "proxmox_virtual_environment_vm" "control_plane3" {
name = "control-plane3"
node_name = "hyper3"
description = "kubernetes control-plane3"
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.control_plane3_ignition,
null_resource.wait_for_cp1
]
# Hardware
cpu {
@@ -276,16 +299,29 @@ resource "proxmox_virtual_environment_vm" "control_plane3" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.control_plane3_ignition.id}"
}
tags = ["control-plane","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.control_plane3_ignition,
null_resource.wait_for_cp1
]
}
resource "null_resource" "wait_for_cp3" {
depends_on = [proxmox_virtual_environment_vm.control_plane3]
provisioner "local-exec" {
command = "sleep 120" # Warte 2 Minuten
}
}
# =============== deploy Workers ====================
resource "proxmox_virtual_environment_vm" "worker1" {
name = "worker1"
node_name = "hyper1"
description = "kubernetes worker1"
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.worker1_ignition,
null_resource.wait_for_cp3
]
# Hardware
cpu {
@@ -313,16 +349,16 @@ resource "proxmox_virtual_environment_vm" "worker1" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.worker1_ignition.id}"
}
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.worker1_ignition,
proxmox_virtual_environment_vm.control_plane1
]
}
resource "proxmox_virtual_environment_vm" "worker2" {
name = "worker2"
node_name = "hyper2"
description = "kubernetes worker2"
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.worker2_ignition,
null_resource.wait_for_cp3
]
# Hardware
cpu {
@@ -350,13 +386,16 @@ resource "proxmox_virtual_environment_vm" "worker2" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.worker2_ignition.id}"
}
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [proxmox_virtual_environment_file.worker2_ignition]
}
resource "proxmox_virtual_environment_vm" "worker3" {
name = "worker3"
node_name = "hyper3"
description = "kubernetes worker3"
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [
proxmox_virtual_environment_file.worker3_ignition,
null_resource.wait_for_cp3
]
# Hardware
cpu {
@@ -384,6 +423,4 @@ resource "proxmox_virtual_environment_vm" "worker3" {
initialization {
user_data_file_id = "${proxmox_virtual_environment_file.worker3_ignition.id}"
}
tags = ["worker","flatcar","kubernetes","terraform"]
depends_on = [proxmox_virtual_environment_file.worker3_ignition]
}