This commit is contained in:
2025-09-01 21:12:31 +02:00
parent 9a4262cad8
commit 7636deafc2
31 changed files with 74289 additions and 0 deletions

BIN
.DS_Store vendored

Binary file not shown.

BIN
argocd/.DS_Store vendored Normal file

Binary file not shown.

7
argocd/README.md Normal file
View File

@@ -0,0 +1,7 @@
that is the install manifest from the argocd quickstart guide
i have added control-node tolerations to all depolyments so it can come up during bootstrap when there is only the master node in the k8s cluster.
not working:
ldap login
ldap only for admins

16
argocd/apps.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: app-of-apps
server: https://kubernetes.default.svc
project: default
source:
path: app-of-apps
repoURL: https://build-node.undercloud.local/Undercloud/k8s-apps.git
targetRevision: HEAD

15
argocd/argocd-secret.yaml Normal file
View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
labels:
app.kubernetes.io/name: argocd-secret
app.kubernetes.io/part-of: argocd
type: Opaque
data:
admin.password: JDJhJDEwJHhpRFAzcHZsNmdrNzlNUEpUZU12aHVSVHR5REppWXVZZUN3eXBIenpqcmpRRkMxV0NrUkVL
admin.passwordMtime: MjAyMy0wMS0wOVQxNTo0MDoxMFo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZakNDQWtxZ0F3SUJBZ0lRRU92UHJkOTZLTzVrbnFNQzJIK2dhakFOQmdrcWhraUc5dzBCQVFzRkFEQVMKTVJBd0RnWURWUVFLRXdkQmNtZHZJRU5FTUI0WERUSXpNREV3T1RFNE1qRTBObG9YRFRJME1ERXdPVEU0TWpFMApObG93RWpFUU1BNEdBMVVFQ2hNSFFYSm5ieUJEUkRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDCkFRb0NnZ0VCQUszeTdhSkVpRVhqcXZRdWRwWE5EYXdKM2lhT0xHUjRLMk9OMll6WWxvUTBoNFR5V0tlbXMzdkwKK1VWd2UxZGMxL1pMc1hxbk9wYkI3T2hySjhKRTQ5N2NlTk4xc25vTHdpejlYRHJxTnA2MGZJYkc1Vnh2QW9jagpJU3NBVTBkamJhc0pmMlpTRkpSYktKYXZvTWhjbFcyanpsU0U1djhvY1JReGZ5ekVQQ01DZDFCQVdHZE9RUUZmCmxxTzJ5SmVDRHZ4clgvWlR2ZXFUL0pjTzdwYWNWTnNYc2Q3QTlrUnF0b1l6bWxMQVpkNHQ2NFZoYTJXazZzZDgKRzZNakdjN043S05xZ09nb21tczNHYzR3b0NWVGlhRnltcDNlTXFpZEtUSTBGSmZtM1Q4cmphY2l2L0RyV0ZldApyUHM4MEZwL3FWaGNaQXpTSzVvcFVVYWJVK0ZMd1lFQ0F3RUFBYU9Cc3pDQnNEQU9CZ05WSFE4QkFmOEVCQU1DCkJhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0V3REFZRFZSMFRBUUgvQkFJd0FEQjdCZ05WSFJFRWREQnkKZ2dsc2IyTmhiR2h2YzNTQ0RXRnlaMjlqWkMxelpYSjJaWEtDRkdGeVoyOWpaQzF6WlhKMlpYSXVZWEpuYjJOawpnaGhoY21kdlkyUXRjMlZ5ZG1WeUxtRnlaMjlqWkM1emRtT0NKbUZ5WjI5alpDMXpaWEoyWlhJdVlYSm5iMk5rCkxuTjJZeTVqYkhWemRHVnlMbXh2WTJGc01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQkticzQva0dhSnNSdnUKT3VRSzRMSFh0bk5oZmdPYXV2VVVnUUU1TmFGYXBkVVJMME1kMDVCQkVkanZjNWp6QVlRd1UzNmFjQSs3L1l0NQpDTFVhQUc0dDZhZ0hrT2tRZ28zbEgxZmxoekFVOXRSanMvU3NyWEJMdVhWVnBibWtNQ0h5VmNCRUx2Q2wyU1RECm5aRkJWUi9ldEozOTBtMVhEOVpqcFh4cUc3V1hjeWxsd1BmVm1oWktLQ3FlRTFZUTVnQnRsZDFuQm9PZDI2NmIKb2ZzbFdVRkdHbHNjUXBMRGthb3p6bUUwWkUveUVBZ3RqRUhwVWlxZWtBMzY2Zm1wWC8rMmJ5ZU1mcENybjZwcApVSUVSVE92a09RWnlmTHRweFpvVis2NmdJYmpWZzhjanlsSVZFWUJURTB6QVZDZVNORlpabml4YXdZaFIvL1VwCjhGclVVRlIyCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBcmZMdG9rU0lSZU9xOUM1MmxjME5yQW5lSm80c1pIZ3JZNDNaak5pV2hEU0hoUEpZCnA2YXplOHY1UlhCN1Yxelg5a3V4ZXFjNmxzSHM2R3Nud2tUajN0eDQwM1d5ZWd2Q0xQMWNPdW8ybnJSOGhzYmwKWEc4Q2h5TWhLd0JUUjJOdHF3bC9abElVbEZzb2xxK2d5RnlWYmFQT1ZJVG0veWh4RkRGL0xNUThJd0ozVUVCWQpaMDVCQVYrV283YklsNElPL0d0ZjlsTzk2cFA4bHc3dWxweFUyeGV4M3NEMlJHcTJoak9hVXNCbDNpM3JoV0ZyClphVHF4M3dib3lNWnpzM3NvMnFBNkNpYWF6Y1p6akNnSlZPSm9YS2FuZDR5cUowcE1qUVVsK2JkUHl1TnB5Sy8KOE90WVY2MnMrenpRV24rcFdGeGtETklybWlsUlJwdFQ0VXZCZ1FJREFRQUJBb0lCQUFsRURucDlVaEQrL3FWQgpNRm5zY1ZUL3RaQ3lOQlVwL1kyeENheWlBT3FMN2NnY0xMTEpnM2dOdG5xSGNscUYvSThIR3k0Z2FGT05ndlFpCmVSeTFGTHBUWGpCTmJiTm9tdkJRa3JBa3Q4SWVkUitzeXB6dzRONlg5WFZYOEJlMmFTb2FPR29YNXphNzRGNmUKZmw4TjUzdk9wUGdGbUZSeE1BeldVaUx0VlJLOFFXbEpSbC9YUGc3YVphRS9pTjh0QlJQWHNybS8welNaNTBDSgpYYmtMaEpHcDZVcXJENVkydHIrVk5FWWlnaGtMS21UaWplcmYySFN0c1lHR2hTZUJocTJTS1RMa0tsSHFlZDJiCnRDUXRTZi9wc0UyODlMeVNsZXo4U0d3dzU2RXpkTVlGNzA4cThQd2FVQ3NKMmx3YUF5OGJqYUNHYU9tMFBCYXgKSlZxaUFrMENnWUVBMFcyZHVGUmRQN0dmK2pyQnNxNFJINGxnQ1hBNVhTd3U2SU9GZnphM3ZvUU5LYWdxWEIwRwpCOFhhdmpscWRUcEdtK29rQTYxcVFjbHozRU0vRGlVbi9LZENxamVZaXlsREt0YUJ4MmMzVjV1cXkvSzNoS3NJCnczbndOVDdxdFJ1MnFxYmE2amZLVEZUQjFMYXN6KzRNK1hieUpkS1Bucnk3QWRrclNnd2Y3QWNDZ1lFQTFLR0sKT3hhUUVZTEErQWJtdEM2c2lRUDJzTEJOTXl4MUUwK015ZzR2SHN2bGJmM056Vkd1K0ZxMmxudUsza1JWV1czWgorM04yTGxmL3VrTVIwKzNMNk4xZVJIYUg1NEdMMGFEMnFBOHU0Y2gvajFvc2xhM1lSQVRPempvRDNxY1VoL1l1CnFHV1dFcHVtRmpVRHcwUy9senQ4T280bC9nSEszcVl0MFJja2xEY0NnWUFFUmNFMjlubWtpQUlrZjdoZDVkRXgKbklMYURuRU5KbzZhYmJ4MmVPNU1zN0wxQzVvNW9ObnpwS1N1eHNzV015Y25uU0k0OVB1Njlkb29QekwxSGVydgo2NVdmbFZ6R1VnUXR4b3lGUGVUU0k3bmJTVXRYS0lvWU9zK2N0bmpoVXZ4cUJOZG9lZFRsczhEMGp2bDlrSFN4ClRpem9lcUExYzlJNDJtNVlwMkNyVndLQmdFTnNPYlhFME1nTXF2MjZ5bnJGOWdXNFVRZnp6M2J6bU9nWHFIQUYKa1dGMWkvbVZxaVY1eWo2TUhTdVlYdzlkb2FhcTA1ME5IcU5SV2hDSTVlVkttUWJzOHRCSXVZMXFJWmpHTHBCWgp5Q2JsK2JUT0JpY3NLZEJmcWVmd3MzdHRoWEFiV3U0ZzBEWjBUblRxT0pnNWUzc0w3TGR3alpGK3BnSjF0VCtBCjFDTi9Bb0dBWGY2R1FqN2RRcnlVUFZ2ZW9pR2RFWFQycGx1RmJkZUdRNmNzd0FPc0V0Y2VvczR5cjFXWExrR1oKbm5lMzlOZFJtVURKYUlUb3RCcnl1SGIyRFdJeEpOMlZ3dkZ5RmxxdTBFM1p3cTIzL2hzdFF1VWpXdGJYZ0dtdwpiTlc0SnhXclBISGZBWFdWUGRUcGNpK2VTSTBCVnhpMFhNQkc5dDViNkY1Z1ViOStOdlE9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
dex.ldap.bindPW: YXJnb2Nkc2VjdXJlUFc=

26
argocd/ingress.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- hosts:
- argocd.apps.undercloud.dev
secretName: argocd-tls
rules:
- host: argocd.apps.undercloud.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 443

26798
argocd/install.yaml Normal file

File diff suppressed because it is too large Load Diff

6
argocd/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd
labels:
prometheus: prometheus

27
argocd/repo.yaml Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: Secret
metadata:
name: build-node-undercloud-infrastructure
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
type: Opaque
stringData:
# URL de ton dépôt Git
url: https://build-node.undercloud.local/Undercloud/undercloud-infrastructure.git
# Mets "true" si HTTP clair ou certificat non fiable
insecure: "true"
---
apiVersion: v1
kind: Secret
metadata:
name: build-node-k8s-apps
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
type: Opaque
stringData:
# URL de ton dépôt Git
url: https://build-node.undercloud.local/Undercloud/k8s-apps.git
# Mets "true" si HTTP clair ou certificat non fiable
insecure: "true"

View File

@@ -0,0 +1,56 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-notifications-controller-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-notifications-controller-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: argocd-server-metrics
namespace: argocd
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app.kubernetes.io/name: argocd-server-metrics
endpoints:
- port: metrics
#path: /metrics
interval: 5s

24
argocd/service.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: argocd-server
namespace: argocd
spec:
type: ClusterIP
clusterIP: 2001:470:7116:f:1::81
clusterIPs:
- 2001:470:7116:f:1::81
- 10.0.91.81
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: RequireDualStack
ports:
- name: http
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8080
selector:
app.kubernetes.io/name: argocd-server

View File

@@ -0,0 +1,127 @@
---
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
#logSeverityScreen: Info
nodeToNodeMeshEnabled: false
asNumber: 65000
serviceClusterIPs:
- cidr: "2001:470:7116:f:1::/108" #server service net
- cidr: "2001:470:7116:f:2::/108" #dmz service net
#- cidr: "fd00:0:0:f:1::/108" #server service net
#- cidr: "fd00:0:0:f:2::/108" #dmz service net
#- cidr: "fd00:0:0:a::/108" #server service net
- cidr: "10.0.91.0/24" #server service net
- cidr: "10.0.92.0/24" #dmz service net
---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router1-v6"
#spec:
# peerIP: "fd00:0:0:2::88" #aux1 - bgp router...
# asNumber: 65000
# nextHopMode: "Self"
# sourceAddress: "UseNodeIP"
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router1-v4"
#spec:
# peerIP: "10.0.2.88" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router2-v6"
#spec:
# peerIP: "fd00:0:0:2::89" #aux1 - bgp router...
# asNumber: 65000
# nextHopMode: "Self"
# sourceAddress: "UseNodeIP"
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "bgp-router2-v4"
#spec:
# peerIP: "10.0.2.89" #aux2 - bgp router...
# asNumber: 65000
#---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: "coreswitch-v6"
spec:
peerIP: "fd00:0:0:2::3" #aux1 - bgp router...
asNumber: 65000
nextHopMode: "Self"
sourceAddress: "UseNodeIP"
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: "coreswitch-v4"
spec:
peerIP: "10.0.2.3" #aux2 - bgp router...
asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "firewall-v4"
#spec:
# peerIP: "10.0.2.1" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "coreswitch-v6"
#spec:
# peerIP: "fd00:0:0:2::3" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "coreswitch-v4"
#spec:
# peerIP: "10.0.2.3" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux1-v6"
#spec:
# peerIP: "fd00:0:0:2::6" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux2-v6"
#spec:
# peerIP: "fd00:0:0:2::7" #aux2 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux1-v4"
#spec:
# peerIP: "10.0.2.6" #aux1 - bgp router...
# asNumber: 65000
#---
#apiVersion: projectcalico.org/v3
#kind: BGPPeer
#metadata:
# name: "aux2-v4"
#spec:
# peerIP: "10.0.2.7" #aux2 - bgp router...
# asNumber: 65000

51
calico-config/calico.yaml Normal file
View File

@@ -0,0 +1,51 @@
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
typhaMetricsPort: 9093
calicoNetwork:
nodeAddressAutodetectionV4:
interface: eth.*
nodeAddressAutodetectionV6:
cidrs:
- "2001:470:7116:2::/64"
ipPools:
- blockSize: 122
cidr: 2001:470:7116:a::/64
encapsulation: "VXLAN"
natOutgoing: Enabled
nodeSelector: all()
- blockSize: 26
cidr: 10.0.10.0/24
encapsulation: IPIP
natOutgoing: Enabled
nodeSelector: all()
flexVolumePath: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
---
apiVersion: v1
kind: Service
metadata:
name: typha-metrics-svc
namespace: calico-system
spec:
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
#ipFamilyPolicy: SingleStack
type: ClusterIP
selector:
k8s-app: calico-typha
ports:
- port: 9093
targetPort: 9093
name: metrics-port
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

9337
calico-config/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
ipPools:
- blockSize: 122
cidr: 2001:470:7116:a::/64
encapsulation: VXLAN
natOutgoing: Disabled
nodeSelector: all()
- blockSize: 26
cidr: 10.0.10.0/24
encapsulation: IPIP
natOutgoing: Enabled
nodeSelector: all()
flexVolumePath: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
---
# Configures the Calico Goldmane flow aggregator.
apiVersion: operator.tigera.io/v1
kind: Goldmane
metadata:
name: default
---
# Configures the Calico Whisker observability UI.
apiVersion: operator.tigera.io/v1
kind: Whisker
metadata:
name: default

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-server-private # server net
spec:
blockSize: 122
cidr: fd00:0:0:a::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-dmz-private # dmz net
spec:
blockSize: 122
cidr: fd00:0:0:b::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: ipv6-dmz-public # dmz net
spec:
blockSize: 122
cidr: 2001:470:7116:b::/64
ipipMode: Never
#natOutgoing: false
#disabled: false
nodeSelector: all()
vxlanMode: Always

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Namespace
metadata:
name: calico-system
labels:
app.kubernetes.io/instance: calico
prometheus: prometheus
---
apiVersion: v1
kind: Namespace
metadata:
name: calico-apiserver
labels:
app.kubernetes.io/instance: calico
prometheus: prometheus

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,590 @@
apiVersion: v1
kind: Namespace
metadata:
name: tigera-operator
labels:
name: tigera-operator
pod-security.kubernetes.io/enforce: privileged
---
# Source: tigera-operator/templates/tigera-operator/02-serviceaccount-tigera-operator.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tigera-operator
namespace: tigera-operator
labels:
k8s-app: tigera-operator
imagePullSecrets:
[]
---
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator-secrets.yaml
# Permissions required to manipulate operator secrets for a Calico cluster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tigera-operator-secrets
labels:
k8s-app: tigera-operator
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- update
- delete
---
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator.yaml
# Permissions required when running the operator for a Calico cluster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tigera-operator
labels:
k8s-app: tigera-operator
rules:
# The tigera/operator installs CustomResourceDefinitions necessary for itself
# and Calico more broadly to function.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- create
# We only allow update access to our own CRDs.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- apiservers.operator.tigera.io
- gatewayapis.operator.tigera.io
- imagesets.operator.tigera.io
- installations.operator.tigera.io
- tigerastatuses.operator.tigera.io
- bgpconfigurations.crd.projectcalico.org
- bgpfilters.crd.projectcalico.org
- bgppeers.crd.projectcalico.org
- blockaffinities.crd.projectcalico.org
- caliconodestatuses.crd.projectcalico.org
- clusterinformations.crd.projectcalico.org
- felixconfigurations.crd.projectcalico.org
- globalnetworkpolicies.crd.projectcalico.org
- stagedglobalnetworkpolicies.crd.projectcalico.org
- globalnetworksets.crd.projectcalico.org
- hostendpoints.crd.projectcalico.org
- ipamblocks.crd.projectcalico.org
- ipamconfigs.crd.projectcalico.org
- ipamhandles.crd.projectcalico.org
- ippools.crd.projectcalico.org
- ipreservations.crd.projectcalico.org
- kubecontrollersconfigurations.crd.projectcalico.org
- networkpolicies.crd.projectcalico.org
- stagednetworkpolicies.crd.projectcalico.org
- stagedkubernetesnetworkpolicies.crd.projectcalico.org
- networksets.crd.projectcalico.org
- tiers.crd.projectcalico.org
- whiskers.operator.tigera.io
- goldmanes.operator.tigera.io
- managementclusterconnections.operator.tigera.io
# We need update and delete access for ANP/BANP CRDs to set owner refs when assuming control of pre-existing CRDs, for example on OCP.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
- delete
resourceNames:
- adminnetworkpolicies.policy.networking.k8s.io
- baselineadminnetworkpolicies.policy.networking.k8s.io
- apiGroups:
- ""
resources:
- namespaces
- pods
- podtemplates
- services
- endpoints
- events
- configmaps
- serviceaccounts
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- ""
resources:
- resourcequotas
- secrets
verbs:
- list
- get
- watch
- apiGroups:
- ""
resources:
- resourcequotas
verbs:
- create
- get
- list
- update
- delete
- watch
resourceNames:
- calico-critical-pods
- tigera-critical-pods
- apiGroups:
- ""
resources:
- nodes
verbs:
# Need to update node labels when migrating nodes.
- get
- patch
- list
# We need this for Typha autoscaling
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
- rolebindings
- roles
verbs:
- create
- get
- list
- update
- delete
- watch
- bind
- escalate
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- create
- get
- list
- patch
- update
- delete
- watch
- apiGroups:
- apps
resourceNames:
- tigera-operator
resources:
- deployments/finalizers
verbs:
- update
# The operator needs read and update permissions on the APIs that it controls.
- apiGroups:
- operator.tigera.io
resources:
# Note: any resources used by the operator within an OwnerReference for resources
# it creates requires permissions to <resource>/finalizers.
- apiservers
- apiservers/finalizers
- apiservers/status
- gatewayapis
- gatewayapis/finalizers
- gatewayapis/status
- goldmanes
- goldmanes/finalizers
- goldmanes/status
- imagesets
- installations
- installations/finalizers
- installations/status
- managementclusterconnections
- managementclusterconnections/finalizers
- managementclusterconnections/status
- tigerastatuses
- tigerastatuses/status
- tigerastatuses/finalizers
- whiskers
- whiskers/finalizers
- whiskers/status
verbs:
- get
- list
- update
- patch
- watch
# In addition to the above, the operator creates and deletes TigeraStatus resources.
- apiGroups:
- operator.tigera.io
resources:
- tigerastatuses
verbs:
- create
- delete
# In addition to the above, the operator should have the ability to delete their own resources during uninstallation.
- apiGroups:
- operator.tigera.io
resources:
- installations
- apiservers
- whiskers
- goldmanes
verbs:
- delete
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- create
- update
- delete
- get
- list
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- felixconfigurations
- ippools
verbs:
- create
- patch
- list
- get
- watch
- apiGroups:
- crd.projectcalico.org
resources:
- kubecontrollersconfigurations
- bgpconfigurations
- clusterinformations
verbs:
- get
- list
- watch
- apiGroups:
- projectcalico.org
resources:
- ippools
verbs:
- create
- update
- delete
- patch
- get
- list
- watch
- apiGroups:
- projectcalico.org
resources:
- ipamconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- scheduling.k8s.io
resources:
- priorityclasses
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- apiregistration.k8s.io
resources:
- apiservices
verbs:
- list
- watch
- create
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- delete
# Needed for operator lock
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- update
- delete
- watch
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- list
- watch
- update
- get
- create
- delete
# Add the permissions to monitor the status of certificate signing requests when certificate management is enabled.
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
# Add the appropriate pod security policy permissions
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- tigera-operator
verbs:
- use
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- get
- list
- watch
- create
- update
- delete
# For tiered network policy actions, tigera-apiserver requires that we authorize the operator for the tier.networkpolicies and tier.globalnetworkpolicies pseudo-kinds.
- apiGroups:
- projectcalico.org
resourceNames:
- allow-tigera.*
resources:
- tier.networkpolicies
- tier.globalnetworkpolicies
verbs:
- list
- watch
- get
- create
- update
- delete
# For tiered network policy actions, tigera-apiserver requires get authorization on the associated tier.
- apiGroups:
- projectcalico.org
resourceNames:
- allow-tigera
resources:
- tiers
verbs:
- get
- delete
- update
# Separated from the above rule since resourceNames does not support the create verb, and requires a field selector for list/watch verbs.
- apiGroups:
- projectcalico.org
resources:
- tiers
verbs:
- create
- list
- watch
# Additions for Gateway API support.
# 1. The operator needs to reconcile gateway.networking.k8s.io and gateway.envoyproxy.io CRDs.
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- backendlbpolicies.gateway.networking.k8s.io
- backendtlspolicies.gateway.networking.k8s.io
- gatewayclasses.gateway.networking.k8s.io
- gateways.gateway.networking.k8s.io
- grpcroutes.gateway.networking.k8s.io
- httproutes.gateway.networking.k8s.io
- referencegrants.gateway.networking.k8s.io
- tcproutes.gateway.networking.k8s.io
- tlsroutes.gateway.networking.k8s.io
- udproutes.gateway.networking.k8s.io
- backends.gateway.envoyproxy.io
- backendtrafficpolicies.gateway.envoyproxy.io
- clienttrafficpolicies.gateway.envoyproxy.io
- envoyextensionpolicies.gateway.envoyproxy.io
- envoypatchpolicies.gateway.envoyproxy.io
- envoyproxies.gateway.envoyproxy.io
- httproutefilters.gateway.envoyproxy.io
- securitypolicies.gateway.envoyproxy.io
# 2. GatewayClasses and EnvoyProxy configurations.
- apiGroups:
- gateway.networking.k8s.io
resources:
- gatewayclasses
verbs:
- create
- update
- delete
- list
- get
- watch
- apiGroups:
- gateway.envoyproxy.io
resources:
- envoyproxies
verbs:
- create
- update
- delete
- list
- get
- watch
# 3. For Gateway API the operator needs to be able to create and reconcile a certificate
# generation job.
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- update
resourceNames:
- tigera-gateway-api-gateway-helm-certgen
---
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tigera-operator
labels:
k8s-app: tigera-operator
subjects:
- kind: ServiceAccount
name: tigera-operator
namespace: tigera-operator
roleRef:
kind: ClusterRole
name: tigera-operator
apiGroup: rbac.authorization.k8s.io
---
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator-secrets.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tigera-operator-secrets
namespace: tigera-operator
labels:
k8s-app: tigera-operator
subjects:
- kind: ServiceAccount
name: tigera-operator
namespace: tigera-operator
roleRef:
kind: ClusterRole
name: tigera-operator-secrets
apiGroup: rbac.authorization.k8s.io
---
# Source: tigera-operator/templates/tigera-operator/02-tigera-operator.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tigera-operator
namespace: tigera-operator
labels:
k8s-app: tigera-operator
spec:
replicas: 1
selector:
matchLabels:
name: tigera-operator
template:
metadata:
labels:
name: tigera-operator
k8s-app: tigera-operator
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
serviceAccountName: tigera-operator
# Set the termination grace period to match how long the operator will wait for
# resources to terminate when being uninstalled.
terminationGracePeriodSeconds: 60
hostNetwork: true
# This must be set when hostNetwork is true or else the cluster services won't resolve
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: tigera-operator
image: quay.io/tigera/operator:v1.38.3
imagePullPolicy: IfNotPresent
command:
- operator
args:
# Configure tigera-operator to manage installation of the necessary CRDs.
- -manage-crds=true
volumeMounts:
- name: var-lib-calico
readOnly: true
mountPath: /var/lib/calico
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "tigera-operator"
- name: TIGERA_OPERATOR_INIT_IMAGE_VERSION
value: v1.38.3
envFrom:
- configMapRef:
name: kubernetes-services-endpoint
optional: true
volumes:
- name: var-lib-calico
hostPath:
path: /var/lib/calico

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: whisker
namespace: calico-system
spec:
type: ClusterIP
clusterIP: 2001:470:7116:f:1::82
clusterIPs:
- 2001:470:7116:f:1::82
- 10.0.91.82
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: RequireDualStack
ports:
- name: http
port: 8081
targetPort: 8081
selector:
app.kubernetes.io/name: whisker

BIN
gitea/.DS_Store vendored Normal file

Binary file not shown.

10
gitea/README.md Normal file
View File

@@ -0,0 +1,10 @@
# Undercloud Gitea
## git
main repo for kubernetes apps
the root url has been changed. check for errors or problems
improvements:
ldap group import (no cli command...)
ldap avatars

47
gitea/adminer.yaml Normal file
View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: adminer
namespace: gitea
labels:
app: adminer
spec:
replicas: 1
selector:
matchLabels:
app: adminer
template:
metadata:
labels:
app: adminer
spec:
containers:
- name: adminer
image: adminer
imagePullPolicy: IfNotPresent
env:
- name: ADMINER_DEFAULT_SERVER
value: db
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: adminer
namespace: gitea
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: adminer
sessionAffinity: None
type: ClusterIP

216
gitea/db.yaml Normal file
View File

@@ -0,0 +1,216 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: db
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db
namespace: gitea
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 16Gi
storageClassName: db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db
namespace: gitea
labels:
app: db
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: mariadb:10.5
imagePullPolicy: "IfNotPresent"
ports:
- name: mysql
containerPort: 3306
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: gitea-db
key: root.pw
- name: MARIADB_USER
valueFrom:
secretKeyRef:
name: gitea-db
key: username
optional: false
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: gitea-db
key: user.pw
optional: false
- name: MARIADB_DATABASE
value: gitea
#livenessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 120
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
#readinessProbe:
# exec:
# command: ["sh", "-c", "exec mysqladmin status -uroot -p$MARIADB_ROOT_PASSWORD"]
# initialDelaySeconds: 30
# periodSeconds: 10
# timeoutSeconds: 1
# successThreshold: 1
# failureThreshold: 3
volumeMounts:
- mountPath: /var/lib/mysql
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: db
readOnly: false
---
apiVersion: v1
kind: Service
metadata:
name: db
namespace: gitea
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
ipFamilyPolicy: SingleStack
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: 3306
selector:
app: db
sessionAffinity: None
type: ClusterIP
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: db-backup
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: db-backup
namespace: gitea
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: db-backup
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: db-backup
namespace: gitea
labels:
app: db-backup
spec:
replicas: 1
selector:
matchLabels:
app: db-backup
template:
metadata:
labels:
app: db-backup
spec:
containers:
- name: db-backup
image: rsprta/mariadb-backup
imagePullPolicy: "IfNotPresent"
env:
- name: CRON_TIMER
value: "@daily"
- name: MARIADB_HOST
value: db
- name: MARIADB_PASSWORD
valueFrom:
secretKeyRef:
name: gitea-db
key: root.pw
- name: MARIADB_USER
value: root
- name: MARIADB_PORT
value: "3306"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
persistentVolumeClaim:
claimName: db-backup
readOnly: false

42
gitea/filesystem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: gitea
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPools:
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: false
metadataServer:
activeCount: 1
activeStandby: true
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
tolerations:
- key: node-role.kubernetes.io/storage-node
operator: Exists
effect: NoSchedule
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
#resources:
# limits:
# cpu: "80m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"

357
gitea/gitea.yaml Normal file
View File

@@ -0,0 +1,357 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: app-ini
namespace: gitea
data:
# file-like keys
app.ini: |
APP_NAME = Gitea: Undercloud Code Repository
RUN_MODE = prod
RUN_USER = git
[repository]
ROOT = /data/git/repositories
ENABLE_PUSH_CREATE_USER=true
ENABLE_PUSH_CREATE_ORG=true
ONLY_ALLOW_PUSH_IF_GITEA_ENVIRONMENT_SET=false
[repository.local]
LOCAL_COPY_PATH = /data/gitea/tmp/local-repo
[repository.upload]
TEMP_PATH = /data/gitea/uploads
[server]
APP_DATA_PATH = /data/gitea
DOMAIN = localhost
SSH_DOMAIN = localhost
HTTP_PORT = 3000
ROOT_URL = https://gitea.undercloud.cf/
DISABLE_SSH = false
SSH_PORT = 22
SSH_LISTEN_PORT = 22
LFS_START_SERVER = true
LFS_JWT_SECRET = LvgbTqg7kmthqjp39gQcTr1nhNgi13A7CNAPOmZHeAc
OFFLINE_MODE = false
[database]
PATH = /data/gitea/gitea.db
DB_TYPE = sqlite3
HOST = localhost:3306
NAME = gitea
USER = root
PASSWD =
LOG_SQL = false
SCHEMA =
SSL_MODE = disable
CHARSET = utf8
[indexer]
ISSUE_INDEXER_PATH = /data/gitea/indexers/issues.bleve
[session]
PROVIDER_CONFIG = /data/gitea/sessions
PROVIDER = file
[picture]
AVATAR_UPLOAD_PATH = /data/gitea/avatars
REPOSITORY_AVATAR_UPLOAD_PATH = /data/gitea/repo-avatars
ENABLE_FEDERATED_AVATAR = false
[attachment]
PATH = /data/gitea/attachments
[log]
MODE = console
LEVEL = info
ROUTER = console
ROOT_PATH = /data/gitea/log
[security]
INSTALL_LOCK = true
SECRET_KEY =
REVERSE_PROXY_LIMIT = 1
REVERSE_PROXY_TRUSTED_PROXIES = *
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE2NzI2MTA0MzB9.MnlX0rQXUl9QQTc2Hy878Tp2SqKRCDwcl9Y6rX2d4t0
PASSWORD_HASH_ALGO = pbkdf2
[service]
DISABLE_REGISTRATION = false
REQUIRE_SIGNIN_VIEW = false
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
DEFAULT_KEEP_EMAIL_PRIVATE = false
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.localhost
[lfs]
PATH = /data/git/lfs
[mailer]
ENABLED = false
[openid]
ENABLE_OPENID_SIGNIN = true
ENABLE_OPENID_SIGNUP = true
[repository.pull-request]
DEFAULT_MERGE_STYLE = merge
[repository.signing]
DEFAULT_TRUST_MODEL = committer
[metrics]
ENABLED=true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: startup
namespace: gitea
data:
startup.sh: |
#!/bin/sh
echo "startup..."
if test ! -f "/data/startup.ran"; then
echo "waiting 60s for startup..."
sleep 60s
echo "writing pw to files"
echo $SHODAN_PW > /data/shodan.pw
echo $ARGOCD_PW > /data/argocd.pw
echo $GITEA_PW > /data/gitea.pw
echo "creating users..."
echo $ARGOCD_PW
su git -c 'echo $ARGOCD_PW'
su git -c 'SHODAN_PW=`cat /data/shodan.pw` && gitea admin user create --username shodan --admin --password $SHODAN_PW --email thrawn235@gmail.com'
su git -c 'ARGOCD_PW=`cat /data/argocd.pw` && gitea admin user create --username argocd --password $ARGOCD_PW --email argocd@undercloud.cf --must-change-password=false'
su git -c 'GITEA_PW=`cat /data/gitea.pw` && gitea admin auth add-ldap --name ldap --security-protocol StartTLS --host ldap.undercloud.cf. --port 389 --user-search-base "ou=users,dc=undercloud,dc=cf" --user-filter "(&(objectClass=person)(uid=%s))" --admin-filter "(&(memberOf=cn=gitea-admins,ou=groups,dc=undercloud,dc=cf))" --email-attribute mail --avatar-attribute jpegPhoto --synchronize-users --skip-tls-verify --username-attribute uid --bind-dn "cn=gitea,ou=serviceaccounts,ou=users,dc=undercloud,dc=cf" --bind-password $GITEA_PW --attributes-in-bind --firstname-attribute cn --surname-attribute sn'
sleep 30s
echo "wget tea..."
wget http://aux-balancer.undercloud.cf.:3000/undercloud/kube-binaries/raw/branch/main/tea
echo "wget ctea..."
wget http://aux-balancer.undercloud.cf.:3000/undercloud/kube-binaries/raw/branch/main/ctea
chmod +x tea
chmod +x ctea
#echo "using tea to create login..."
#./tea login add --url http://localhost:3000 -i --user shodan --password $SHODAN_PW
#./tea login default localhost:3000
echo "creating undercloud organisation"
sleep 30s
#./tea organization create undercloud
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 CreateOrg undercloud
sleep 5s
echo "creating undercloud team"
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 CreateTeam undercloud undercloud
sleep 5s
echo "add argocd to undercloud team"
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 AddUserToTeam undercloud undercloud argocd
sleep 5s
echo "cloning k8aux-apps"
execline-cd /data git clone http://aux-balancer.undercloud.cf.:3000/undercloud/k8aux-apps.git
execline-cd /data/k8aux-apps rm -Rf .git
execline-cd /data/k8aux-apps git init
execline-cd /data/k8aux-apps git config --global user.email "thrawn235@gmail.com"
execline-cd /data/k8aux-apps git config --global user.name "shodan"
execline-cd /data/k8aux-apps git add .
execline-cd /data/k8aux-apps git commit -m "upload"
echo "push k8aux-apps to localhost"
execline-cd /data/k8aux-apps git push http://shodan:$SHODAN_PW@localhost:3000/undercloud/k8aux-apps.git --all
echo "delete local copy..."
#execline-cd /data rm -Rf k8aux-apps
echo "create PushMirror.."
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 AddPushMirror undercloud k8aux-apps "http://aux1.undercloud.cf.:3000/undercloud/k8aux-apps.git" shodan $SHODAN_PW 1h0m0s
./ctea --username shodan --password $SHODAN_PW --url http://localhost:3000 AddPushMirror undercloud k8aux-apps "http://aux2.undercloud.cf.:3000/undercloud/k8aux-apps.git" shodan $SHODAN_PW 1h0m0s
echo "create startup.ran file..."
touch /data/startup.ran
else
echo "startup ran already!"
fi
echo "startup done."
#exit 123
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gitea
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: gitea
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: gitea-replicated
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea
namespace: gitea
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 32Gi
storageClassName: gitea
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: gitea
labels:
app: gitea
spec:
replicas: 1
selector:
matchLabels:
app: gitea
template:
metadata:
labels:
app: gitea
spec:
initContainers:
- name: copyappini
image: gitea/gitea:1.19
command: ["bash", "-c", "mkdir -p /data/gitea/conf && cp -f /copy/app.ini /data/gitea/conf/app.ini"]
volumeMounts:
- mountPath: /data
name: data
- mountPath: /copy
name: app-ini
containers:
- name: gitea
image: gitea/gitea:1.19
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
- containerPort: 22
#lifecycle:
# postStart:
# exec:
# command:
# - "/bin/startup.sh"
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: GITEA__database__DB_TYPE
value: mysql
- name: GITEA__database__HOST
value: db:3306
- name: GITEA__database__NAME
value: gitea
- name: GITEA__database__USER
valueFrom:
secretKeyRef:
name: gitea-db
key: username
optional: false
- name: GITEA__database__PASSWD
valueFrom:
secretKeyRef:
name: gitea-db
key: user.pw
optional: false
- name: SHODAN_PW
valueFrom:
secretKeyRef:
name: shodan
key: pw
optional: false
- name: ARGOCD_PW
valueFrom:
secretKeyRef:
name: argocd-user
key: pw
optional: false
- name: GITEA_PW
valueFrom:
secretKeyRef:
name: gitea-user
key: pw
optional: false
livenessProbe:
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 200
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
volumeMounts:
- mountPath: /data
name: data
- mountPath: /bin/startup.sh
name: startup
subPath: startup.sh
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea
readOnly: false
- name: app-ini
configMap:
name: app-ini
items:
- key: "app.ini"
path: "app.ini"
- name: startup
configMap:
name: startup
defaultMode: 0700
items:
- key: "startup.sh"
path: "startup.sh"
---
apiVersion: v1
kind: Service
metadata:
name: gitea
namespace: gitea
labels:
app: gitea
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv6
- IPv4
ipFamilyPolicy: PreferDualStack
ports:
- name: http
port: 3000
protocol: TCP
targetPort: 3000
- name: ssh
port: 22
protocol: TCP
targetPort: 22
selector:
app: gitea
sessionAffinity: None
type: ClusterIP

25
gitea/ingress.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea
namespace: gitea
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- gitea.undercloud.cf
secretName: gitea-tls
rules:
- host: gitea.undercloud.cf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
number: 3000

6
gitea/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: gitea
labels:
prometheus: prometheus

37
gitea/secrets.yaml Normal file
View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Secret
metadata:
name: gitea-db
namespace: gitea
type: Opaque
data:
root.pw: dGhpc2lzYXB3
username: Z2l0ZWE=
user.pw: YW5kYW5vdGVyb25l
---
apiVersion: v1
kind: Secret
metadata:
name: shodan
namespace: gitea
type: Opaque
data:
pw: NElzVGhlTWluZEtpbGxlcg==
---
apiVersion: v1
kind: Secret
metadata:
name: argocd-user
namespace: gitea
type: Opaque
data:
pw: dW5zZWN1cmVwdw==
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-user
namespace: gitea
type: Opaque
data:
pw: Z2l0ZWFzZWN1cmVQVw==

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: gitea-metrics
namespace: gitea
labels:
team: undercloud
spec:
#namespaceSelector:
# matchNames:
# - argocd-metrics
selector:
matchLabels:
app: gitea
endpoints:
- port: http
#path: /metrics
interval: 5s