calico
This commit is contained in:
48
calico-config/calico-peer.yaml
Normal file
48
calico-config/calico-peer.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
#logSeverityScreen: Info
|
||||
nodeToNodeMeshEnabled: false
|
||||
asNumber: 65000
|
||||
serviceClusterIPs:
|
||||
- cidr: "2001:470:72f0:f:1::/108" #server service net
|
||||
- cidr: "fd00:0:0:f:1::/108" #server service net
|
||||
- cidr: "2001:470:72f0:f:2::/108" #dmz service net
|
||||
- cidr: "fd00:0:0:f:2::/108" #dmz service net
|
||||
- cidr: "10.0.91.0/24" #server service net
|
||||
- cidr: "10.0.92.0/24" #dmz service net
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: "aux1-v6"
|
||||
spec:
|
||||
peerIP: "fd00:0:0:2::6" #aux1 - bgp router...
|
||||
asNumber: 65000
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: "aux2-v6"
|
||||
spec:
|
||||
peerIP: "fd00:0:0:2::7" #aux2 - bgp router...
|
||||
asNumber: 65000
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: "aux1-v4"
|
||||
spec:
|
||||
peerIP: "10.0.2.6" #aux1 - bgp router...
|
||||
asNumber: 65000
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: "aux2-v4"
|
||||
spec:
|
||||
peerIP: "10.0.2.7" #aux2 - bgp router...
|
||||
asNumber: 65000
|
||||
50
calico-config/calico.yaml
Normal file
50
calico-config/calico.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
typhaMetricsPort: 9093
|
||||
calicoNetwork:
|
||||
nodeAddressAutodetectionV4:
|
||||
interface: en.*
|
||||
nodeAddressAutodetectionV6:
|
||||
interface: en.*
|
||||
ipPools:
|
||||
- blockSize: 122
|
||||
cidr: 2001:470:72f0:a::/64
|
||||
encapsulation: VXLAN
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
||||
- blockSize: 26
|
||||
cidr: 10.0.10.0/24
|
||||
encapsulation: IPIP
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
flexVolumePath: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: typha-metrics-svc
|
||||
namespace: calico-system
|
||||
spec:
|
||||
ipFamilies:
|
||||
- IPv6
|
||||
- IPv4
|
||||
ipFamilyPolicy: PreferDualStack
|
||||
#ipFamilyPolicy: SingleStack
|
||||
type: ClusterIP
|
||||
selector:
|
||||
k8s-app: calico-typha
|
||||
ports:
|
||||
- port: 9093
|
||||
targetPort: 9093
|
||||
name: metrics-port
|
||||
---
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
9337
calico-config/crds.yaml
Normal file
9337
calico-config/crds.yaml
Normal file
File diff suppressed because it is too large
Load Diff
42
calico-config/custom-resources.yaml
Normal file
42
calico-config/custom-resources.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 26
|
||||
cidr: 192.168.0.0/16
|
||||
encapsulation: VXLANCrossSubnet
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
|
||||
---
|
||||
|
||||
# Configures the Calico Goldmane flow aggregator.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Goldmane
|
||||
metadata:
|
||||
name: default
|
||||
|
||||
---
|
||||
|
||||
# Configures the Calico Whisker observability UI.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Whisker
|
||||
metadata:
|
||||
name: default
|
||||
1188
calico-config/grafana-dashboards.yaml
Normal file
1188
calico-config/grafana-dashboards.yaml
Normal file
File diff suppressed because it is too large
Load Diff
38
calico-config/ippools.yaml
Normal file
38
calico-config/ippools.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: ipv6-server-private # server net
|
||||
spec:
|
||||
blockSize: 122
|
||||
cidr: fd00:0:0:a::/64
|
||||
ipipMode: Never
|
||||
#natOutgoing: false
|
||||
#disabled: false
|
||||
nodeSelector: all()
|
||||
vxlanMode: Always
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: ipv6-dmz-private # dmz net
|
||||
spec:
|
||||
blockSize: 122
|
||||
cidr: fd00:0:0:b::/64
|
||||
ipipMode: Never
|
||||
#natOutgoing: false
|
||||
#disabled: false
|
||||
nodeSelector: all()
|
||||
vxlanMode: Always
|
||||
---
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: ipv6-dmz-public # dmz net
|
||||
spec:
|
||||
blockSize: 122
|
||||
cidr: 2001:470:72f0:b::/64
|
||||
ipipMode: Never
|
||||
#natOutgoing: false
|
||||
#disabled: false
|
||||
nodeSelector: all()
|
||||
vxlanMode: Always
|
||||
15
calico-config/namespace.yaml
Normal file
15
calico-config/namespace.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: calico-system
|
||||
labels:
|
||||
app.kubernetes.io/instance: calico
|
||||
prometheus: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: calico-apiserver
|
||||
labels:
|
||||
app.kubernetes.io/instance: calico
|
||||
prometheus: prometheus
|
||||
35141
calico-config/operator-crds.yaml
Normal file
35141
calico-config/operator-crds.yaml
Normal file
File diff suppressed because it is too large
Load Diff
590
calico-config/tigera-operator.yaml
Normal file
590
calico-config/tigera-operator.yaml
Normal file
@@ -0,0 +1,590 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: tigera-operator
|
||||
labels:
|
||||
name: tigera-operator
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-serviceaccount-tigera-operator.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tigera-operator
|
||||
namespace: tigera-operator
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
imagePullSecrets:
|
||||
[]
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator-secrets.yaml
|
||||
# Permissions required to manipulate operator secrets for a Calico cluster.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: tigera-operator-secrets
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-role-tigera-operator.yaml
|
||||
# Permissions required when running the operator for a Calico cluster.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: tigera-operator
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
rules:
|
||||
# The tigera/operator installs CustomResourceDefinitions necessary for itself
|
||||
# and Calico more broadly to function.
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
# We only allow update access to our own CRDs.
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- apiservers.operator.tigera.io
|
||||
- gatewayapis.operator.tigera.io
|
||||
- imagesets.operator.tigera.io
|
||||
- installations.operator.tigera.io
|
||||
- tigerastatuses.operator.tigera.io
|
||||
- bgpconfigurations.crd.projectcalico.org
|
||||
- bgpfilters.crd.projectcalico.org
|
||||
- bgppeers.crd.projectcalico.org
|
||||
- blockaffinities.crd.projectcalico.org
|
||||
- caliconodestatuses.crd.projectcalico.org
|
||||
- clusterinformations.crd.projectcalico.org
|
||||
- felixconfigurations.crd.projectcalico.org
|
||||
- globalnetworkpolicies.crd.projectcalico.org
|
||||
- stagedglobalnetworkpolicies.crd.projectcalico.org
|
||||
- globalnetworksets.crd.projectcalico.org
|
||||
- hostendpoints.crd.projectcalico.org
|
||||
- ipamblocks.crd.projectcalico.org
|
||||
- ipamconfigs.crd.projectcalico.org
|
||||
- ipamhandles.crd.projectcalico.org
|
||||
- ippools.crd.projectcalico.org
|
||||
- ipreservations.crd.projectcalico.org
|
||||
- kubecontrollersconfigurations.crd.projectcalico.org
|
||||
- networkpolicies.crd.projectcalico.org
|
||||
- stagednetworkpolicies.crd.projectcalico.org
|
||||
- stagedkubernetesnetworkpolicies.crd.projectcalico.org
|
||||
- networksets.crd.projectcalico.org
|
||||
- tiers.crd.projectcalico.org
|
||||
- whiskers.operator.tigera.io
|
||||
- goldmanes.operator.tigera.io
|
||||
- managementclusterconnections.operator.tigera.io
|
||||
# We need update and delete access for ANP/BANP CRDs to set owner refs when assuming control of pre-existing CRDs, for example on OCP.
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
- delete
|
||||
resourceNames:
|
||||
- adminnetworkpolicies.policy.networking.k8s.io
|
||||
- baselineadminnetworkpolicies.policy.networking.k8s.io
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
- podtemplates
|
||||
- services
|
||||
- endpoints
|
||||
- events
|
||||
- configmaps
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- resourcequotas
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- resourcequotas
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
resourceNames:
|
||||
- calico-critical-pods
|
||||
- tigera-critical-pods
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
# Need to update node labels when migrating nodes.
|
||||
- get
|
||||
- patch
|
||||
- list
|
||||
# We need this for Typha autoscaling
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- clusterroles
|
||||
- clusterrolebindings
|
||||
- rolebindings
|
||||
- roles
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- bind
|
||||
- escalate
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- tigera-operator
|
||||
resources:
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
# The operator needs read and update permissions on the APIs that it controls.
|
||||
- apiGroups:
|
||||
- operator.tigera.io
|
||||
resources:
|
||||
# Note: any resources used by the operator within an OwnerReference for resources
|
||||
# it creates requires permissions to <resource>/finalizers.
|
||||
- apiservers
|
||||
- apiservers/finalizers
|
||||
- apiservers/status
|
||||
- gatewayapis
|
||||
- gatewayapis/finalizers
|
||||
- gatewayapis/status
|
||||
- goldmanes
|
||||
- goldmanes/finalizers
|
||||
- goldmanes/status
|
||||
- imagesets
|
||||
- installations
|
||||
- installations/finalizers
|
||||
- installations/status
|
||||
- managementclusterconnections
|
||||
- managementclusterconnections/finalizers
|
||||
- managementclusterconnections/status
|
||||
- tigerastatuses
|
||||
- tigerastatuses/status
|
||||
- tigerastatuses/finalizers
|
||||
- whiskers
|
||||
- whiskers/finalizers
|
||||
- whiskers/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- watch
|
||||
# In addition to the above, the operator creates and deletes TigeraStatus resources.
|
||||
- apiGroups:
|
||||
- operator.tigera.io
|
||||
resources:
|
||||
- tigerastatuses
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
# In addition to the above, the operator should have the ability to delete their own resources during uninstallation.
|
||||
- apiGroups:
|
||||
- operator.tigera.io
|
||||
resources:
|
||||
- installations
|
||||
- apiservers
|
||||
- whiskers
|
||||
- goldmanes
|
||||
verbs:
|
||||
- delete
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- felixconfigurations
|
||||
- ippools
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- crd.projectcalico.org
|
||||
resources:
|
||||
- kubecontrollersconfigurations
|
||||
- bgpconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- projectcalico.org
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- projectcalico.org
|
||||
resources:
|
||||
- ipamconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- scheduling.k8s.io
|
||||
resources:
|
||||
- priorityclasses
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiregistration.k8s.io
|
||||
resources:
|
||||
- apiservices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
verbs:
|
||||
- delete
|
||||
# Needed for operator lock
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- csidrivers
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
# Add the permissions to monitor the status of certificate signing requests when certificate management is enabled.
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- certificatesigningrequests
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# Add the appropriate pod security policy permissions
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- tigera-operator
|
||||
verbs:
|
||||
- use
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# For tiered network policy actions, tigera-apiserver requires that we authorize the operator for the tier.networkpolicies and tier.globalnetworkpolicies pseudo-kinds.
|
||||
- apiGroups:
|
||||
- projectcalico.org
|
||||
resourceNames:
|
||||
- allow-tigera.*
|
||||
resources:
|
||||
- tier.networkpolicies
|
||||
- tier.globalnetworkpolicies
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# For tiered network policy actions, tigera-apiserver requires get authorization on the associated tier.
|
||||
- apiGroups:
|
||||
- projectcalico.org
|
||||
resourceNames:
|
||||
- allow-tigera
|
||||
resources:
|
||||
- tiers
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
- update
|
||||
# Separated from the above rule since resourceNames does not support the create verb, and requires a field selector for list/watch verbs.
|
||||
- apiGroups:
|
||||
- projectcalico.org
|
||||
resources:
|
||||
- tiers
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- watch
|
||||
# Additions for Gateway API support.
|
||||
# 1. The operator needs to reconcile gateway.networking.k8s.io and gateway.envoyproxy.io CRDs.
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- backendlbpolicies.gateway.networking.k8s.io
|
||||
- backendtlspolicies.gateway.networking.k8s.io
|
||||
- gatewayclasses.gateway.networking.k8s.io
|
||||
- gateways.gateway.networking.k8s.io
|
||||
- grpcroutes.gateway.networking.k8s.io
|
||||
- httproutes.gateway.networking.k8s.io
|
||||
- referencegrants.gateway.networking.k8s.io
|
||||
- tcproutes.gateway.networking.k8s.io
|
||||
- tlsroutes.gateway.networking.k8s.io
|
||||
- udproutes.gateway.networking.k8s.io
|
||||
- backends.gateway.envoyproxy.io
|
||||
- backendtrafficpolicies.gateway.envoyproxy.io
|
||||
- clienttrafficpolicies.gateway.envoyproxy.io
|
||||
- envoyextensionpolicies.gateway.envoyproxy.io
|
||||
- envoypatchpolicies.gateway.envoyproxy.io
|
||||
- envoyproxies.gateway.envoyproxy.io
|
||||
- httproutefilters.gateway.envoyproxy.io
|
||||
- securitypolicies.gateway.envoyproxy.io
|
||||
# 2. GatewayClasses and EnvoyProxy configurations.
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- gatewayclasses
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- gateway.envoyproxy.io
|
||||
resources:
|
||||
- envoyproxies
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
# 3. For Gateway API the operator needs to be able to create and reconcile a certificate
|
||||
# generation job.
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- tigera-gateway-api-gateway-helm-certgen
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tigera-operator
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tigera-operator
|
||||
namespace: tigera-operator
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: tigera-operator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-rolebinding-tigera-operator-secrets.yaml
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tigera-operator-secrets
|
||||
namespace: tigera-operator
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tigera-operator
|
||||
namespace: tigera-operator
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: tigera-operator-secrets
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: tigera-operator/templates/tigera-operator/02-tigera-operator.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: tigera-operator
|
||||
namespace: tigera-operator
|
||||
labels:
|
||||
k8s-app: tigera-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: tigera-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: tigera-operator
|
||||
k8s-app: tigera-operator
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
serviceAccountName: tigera-operator
|
||||
# Set the termination grace period to match how long the operator will wait for
|
||||
# resources to terminate when being uninstalled.
|
||||
terminationGracePeriodSeconds: 60
|
||||
hostNetwork: true
|
||||
# This must be set when hostNetwork is true or else the cluster services won't resolve
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: tigera-operator
|
||||
image: quay.io/tigera/operator:v1.38.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- operator
|
||||
args:
|
||||
# Configure tigera-operator to manage installation of the necessary CRDs.
|
||||
- -manage-crds=true
|
||||
volumeMounts:
|
||||
- name: var-lib-calico
|
||||
readOnly: true
|
||||
mountPath: /var/lib/calico
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
value: ""
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "tigera-operator"
|
||||
- name: TIGERA_OPERATOR_INIT_IMAGE_VERSION
|
||||
value: v1.38.3
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
volumes:
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
@@ -252,7 +252,7 @@ systemd:
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
||||
- name: kubeadm-init.service
|
||||
enabled: true
|
||||
contents: |
|
||||
@@ -282,3 +282,34 @@ systemd:
|
||||
RestartSec=120s
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: install-calico.service
|
||||
enabled: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Wants=kubeadm.target
|
||||
After=kubeadm.target
|
||||
[Service]
|
||||
Environment=KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
Environment=DATASTORE_TYPE=kubernetes
|
||||
Environment=PATH=/usr/bin/:/usr/sbin:/opt/bin
|
||||
Type=oneshot
|
||||
StandardOutput=journal+console
|
||||
StandardError=journal+console
|
||||
#ExecStartPre=/bin/sleep 120s
|
||||
ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/namespace.yaml
|
||||
ExecStart=/opt/bin/kubectl create -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/tigera-operator.yaml
|
||||
ExecStart=/bin/sleep 60s
|
||||
ExecStart=/opt/bin/kubectl wait deployment -n tigera-operator tigera-operator --for condition=Available=True --timeout=600s
|
||||
ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico.yaml
|
||||
ExecStart=/bin/sleep 10m
|
||||
ExecStart=/opt/bin/kubectl wait deployment -n calico-apiserver calico-apiserver --for condition=Available=True --timeout=600s
|
||||
ExecStart=/bin/sleep 3m
|
||||
ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/calico-peer.yaml
|
||||
ExecStart=/bin/sleep 2m
|
||||
ExecStart=/opt/bin/kubectl apply -f http://build-node.undercloud.local:3000/admin/undercloud-infrastructure/raw/branch/main/calico-config/ippools.yaml
|
||||
ExecStart=/usr/bin/systemctl disable install-calico.service
|
||||
#RemainAfterExit=true
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -264,7 +264,7 @@ systemd:
|
||||
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
|
||||
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
|
||||
|
||||
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
|
||||
|
||||
@@ -264,7 +264,7 @@ systemd:
|
||||
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
|
||||
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
|
||||
|
||||
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
|
||||
|
||||
@@ -253,13 +253,8 @@ systemd:
|
||||
ExecStartPre=/bin/sleep 30s
|
||||
|
||||
ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml
|
||||
|
||||
# copy files for kubectl
|
||||
ExecStartPost=/usr/bin/mkdir -p /home/core/.kube
|
||||
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
|
||||
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
|
||||
|
||||
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
|
||||
|
||||
@@ -253,13 +253,8 @@ systemd:
|
||||
ExecStartPre=/bin/sleep 30s
|
||||
|
||||
ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml
|
||||
|
||||
# copy files for kubectl
|
||||
ExecStartPost=/usr/bin/mkdir -p /home/core/.kube
|
||||
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
|
||||
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
|
||||
|
||||
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
|
||||
|
||||
@@ -253,13 +253,8 @@ systemd:
|
||||
ExecStartPre=/bin/sleep 30s
|
||||
|
||||
ExecStart=/opt/bin/kubeadm join --config=/etc/kubernetes/kubeadm-join.yaml
|
||||
|
||||
# copy files for kubectl
|
||||
ExecStartPost=/usr/bin/mkdir -p /home/core/.kube
|
||||
ExecStartPost=/usr/bin/cp -i /etc/kubernetes/admin.conf /home/core/.kube/config
|
||||
ExecStartPost=/usr/bin/chown core:core /home/core/.kube/config
|
||||
|
||||
ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
#ExecStartPost=/usr/bin/systemctl disable kubeadm-init.service
|
||||
Restart=on-failure
|
||||
RestartSec=120s
|
||||
|
||||
|
||||
Reference in New Issue
Block a user