summaryrefslogtreecommitdiff
path: root/roles/kubernetes/kubeadm
diff options
context:
space:
mode:
authorChristian Pointner <equinox@spreadspace.org>2023-05-06 18:16:58 +0200
committerChristian Pointner <equinox@spreadspace.org>2023-05-06 18:16:58 +0200
commitabe295da0f0acf4a3c9e0035fe56197dbd482e2f (patch)
treeb740610b732c12e63c657d2d414e61e01a5850ad /roles/kubernetes/kubeadm
parentprepare hosts for k8s-chtest cluster (diff)
k8s-chtest: upgrade cluster to 1.27.1
Diffstat (limited to 'roles/kubernetes/kubeadm')
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/primary.yml1
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j24
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.5.4.yml.j2236
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml3
5 files changed, 240 insertions, 6 deletions
diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml
index 3506dce9..39e24ccb 100644
--- a/roles/kubernetes/kubeadm/base/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/main.yml
@@ -14,7 +14,7 @@
apt:
name:
- haproxy
- - haproxyctl
+ - hatop
- "kubeadm={{ kubernetes_version }}-00"
- "kubectl={{ kubernetes_version }}-00"
state: present
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
index 2c16d406..36195235 100644
--- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
@@ -19,7 +19,6 @@
- name: initialize kubernetes primary control-plane node and store log
block:
- name: initialize kubernetes primary control-plane node
- ## TODO: kubeadm 1.26+: add cli option '--skip-phases=show-join-command'
command: "kubeadm init --config /etc/kubernetes/kubeadm.config --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
index 390c1b15..9dfd6825 100644
--- a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
@@ -11,10 +11,10 @@ localAPIEndpoint:
{% if kubernetes_overlay_node_ip is defined %}
advertiseAddress: "{{ kubernetes_overlay_node_ip }}"
{% endif %}
-{% if kubernetes_network_plugin_replaces_kube_proxy %}
skipPhases:
+- show-join-command
+{% if kubernetes_network_plugin_replaces_kube_proxy %}
- addon/kube-proxy
-{#- show-join-command can be enable for 1.26+ #}
{% endif %}
nodeRegistration:
name: "{{ kubernetes_node_name }}"
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.5.4.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.5.4.yml.j2
new file mode 100644
index 00000000..382164cb
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.5.4.yml.j2
@@ -0,0 +1,236 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index 68f9d895..d89dd32f 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -1,7 +1,6 @@
---
- name: clean up settings and files created by kubeadm
- ## TODO: kubeadm 1.26+: add CLI option '--cleanup-tmp-dir'?
- command: kubeadm reset -f
+ command: kubeadm reset -f --cleanup-tmp-dir
- name: clean up extra configs and logs
loop: