summaryrefslogtreecommitdiff
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/kubernetes/addons/metrics-server/tasks/main.yml11
-rw-r--r--roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2155
-rw-r--r--roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 (renamed from roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2)225
-rw-r--r--roles/kubernetes/base/tasks/cri_docker.yml2
-rw-r--r--roles/kubernetes/base/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml4
-rw-r--r--roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j28
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j216
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j212
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/main.yml (renamed from roles/kubernetes/kubeadm/master/tasks/main.yml)43
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml)11
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml)11
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_none.yml)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/primary.yml (renamed from roles/kubernetes/kubeadm/master/tasks/primary-master.yml)24
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml (renamed from roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml)12
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 (renamed from roles/kubernetes/kubeadm/master/templates/encryption-config.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 (renamed from roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2)12
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/upgrade12
-rw-r--r--roles/kubernetes/kubeadm/worker/tasks/main.yml (renamed from roles/kubernetes/kubeadm/node/tasks/main.yml)4
26 files changed, 238 insertions, 328 deletions
diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml
index 5236e4e3..87c57346 100644
--- a/roles/kubernetes/addons/metrics-server/tasks/main.yml
+++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml
@@ -9,8 +9,13 @@
src: "components.{{ kubernetes_metrics_server_version }}.yml.j2"
dest: /etc/kubernetes/addons/metrics-server/config.yml
- ## TODO: move to server-side apply (GA since 1.22)
+- name: check if metrics-server is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/addons/metrics-server/config.yml
+ failed_when: false
+ changed_when: false
+ register: kube_metrics_server_diff_result
+
- name: install metrics-server onto the cluster
+ when: kube_metrics_server_diff_result.rc != 0
command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml
- register: kube_metrics_server_apply_result
- changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2
deleted file mode 100644
index fc8d287b..00000000
--- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2
+++ /dev/null
@@ -1,155 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:aggregated-metrics-reader
- labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
- rbac.authorization.k8s.io/aggregate-to-admin: "true"
-rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: metrics-server:system:auth-delegator
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:auth-delegator
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- k8s-app: metrics-server
-spec:
- selector:
- matchLabels:
- k8s-app: metrics-server
- template:
- metadata:
- name: metrics-server
- labels:
- k8s-app: metrics-server
- spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
- containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7
- imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --kubelet-insecure-tls
- - --kubelet-preferred-address-types=InternalIP,ExternalIP
- ports:
- - name: main-port
- containerPort: 4443
- protocol: TCP
- securityContext:
- readOnlyRootFilesystem: true
- runAsNonRoot: true
- runAsUser: 1000
- volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
- nodeSelector:
- kubernetes.io/os: linux
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
----
-apiVersion: v1
-kind: Service
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
- k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2
index 1e3789bb..7b427254 100644
--- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2
+++ b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2
@@ -1,20 +1,75 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: system:aggregated-metrics-reader
labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -26,131 +81,117 @@ subjects:
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
+kind: ClusterRoleBinding
metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
+ kind: ClusterRole
+ name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
apiVersion: v1
-kind: ServiceAccount
+kind: Service
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server
namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
- name: metrics-server
- namespace: kube-system
labels:
k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
template:
metadata:
- name: metrics-server
labels:
k8s-app: metrics-server
spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server-amd64:v0.3.6
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-insecure-tls
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --kubelet-insecure-tls
- - --kubelet-preferred-address-types=InternalIP,ExternalIP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
ports:
- - name: main-port
- containerPort: 4443
+ - containerPort: 4443
+ name: https
protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
securityContext:
+ allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
+ - mountPath: /tmp
+ name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
- kubernetes.io/arch: "amd64"
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
---
-apiVersion: v1
-kind: Service
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
metadata:
- name: metrics-server
- namespace: kube-system
labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml
index c9598638..626395b7 100644
--- a/roles/kubernetes/base/tasks/cri_docker.yml
+++ b/roles/kubernetes/base/tasks/cri_docker.yml
@@ -10,7 +10,7 @@
path: /etc/systemd/system/kubelet.service.d/
state: directory
-- name: install systemd snippet to make sure kubelet starts after docker
+- name: install systemd snippet to make sure kubelet starts after cri-dockerd
copy:
content: |
[Unit]
diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml
index 4ff976a1..d2f7ef81 100644
--- a/roles/kubernetes/base/tasks/main.yml
+++ b/roles/kubernetes/base/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: check if prometheus apt component of spreadspace repo is enabled
+- name: check if container apt component of spreadspace repo is enabled
assert:
msg: "please enable the 'container' component of spreadspace repo using 'spreadspace_apt_repo_components'"
that:
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
index 40cee3b7..350ecdee 100644
--- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
@@ -87,8 +87,8 @@
- name: install cni config
template:
- src: net_kubeguard/cni.json.j2
- dest: /etc/cni/net.d/kubeguard.conf
+ src: net_kubeguard/cni.conflist.j2
+ dest: /etc/cni/net.d/kubeguard.conflist
- name: install packages needed for debugging kube-router
when: kubernetes_network_plugin_variant == 'with-kube-router'
diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
index 2e0eaf5d..19118b2e 100644
--- a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
+++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
@@ -16,7 +16,7 @@ defaults
option dontlog-normal
frontend kube_api
-{% if '_kubernetes_masters_' in group_names %}
+{% if '_kubernetes_controlplane_nodes_' in group_names %}
bind *:6443
{% else %}
bind 127.0.0.1:6443
@@ -25,7 +25,7 @@ frontend kube_api
default_backend kube_api
backend kube_api
-{% if '_kubernetes_masters_' in group_names %}
+{% if '_kubernetes_controlplane_nodes_' in group_names %}
balance first
{% else %}
balance roundrobin
@@ -36,6 +36,6 @@ backend kube_api
default-server inter 5s fall 3 rise 2
timeout connect 5s
timeout server 3h
-{% for master in groups['_kubernetes_masters_'] %}
- server {{ master }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none
+{% for node in groups['_kubernetes_controlplane_nodes_'] %}
+ server {{ node }} {{ hostvars[node].kubernetes_overlay_node_ip | default(hostvars[node].ansible_default_ipv4.address) }}:6442 {% if node == inventory_hostname %}id 1{% endif %} check check-ssl verify none
{% endfor %}
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2
new file mode 100644
index 00000000..240d86ef
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2
@@ -0,0 +1,16 @@
+{
+ "cniVersion": "0.3.1",
+ "name": "kubeguard",
+ "plugins": [
+ {
+ "type": "bridge",
+ "bridge": "kubeguard-br0",
+ "isDefaultGateway": true,
+ "hairpinMode": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}"
+ }
+ }
+ ]
+}
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
deleted file mode 100644
index eb9e3d61..00000000
--- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "cniVersion": "0.3.1",
- "name": "kubeguard",
- "type": "bridge",
- "bridge": "kubeguard-br0",
- "isDefaultGateway": true,
- "hairpinMode": true,
- "ipam": {
- "type": "host-local",
- "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}"
- }
-}
diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
index 04df760f..d5bd378e 100644
--- a/roles/kubernetes/kubeadm/master/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
@@ -12,48 +12,47 @@
mode: 0600
-- name: install primary master
- include_tasks: primary-master.yml
- when: "'_kubernetes_primary_master_' in group_names"
+- name: install primary control-plane node
+ include_tasks: primary.yml
+ when: "'_kubernetes_primary_controlplane_node_' in group_names"
-- name: install secondary masters
- include_tasks: secondary-masters.yml
- when: "'_kubernetes_primary_master_' not in group_names"
+- name: install secondary control-plane nodes
+ include_tasks: secondary.yml
+ when: "'_kubernetes_primary_controlplane_node_' not in group_names"
-- name: check if master is tainted (1/2)
+- name: check if control-plane node is tainted (1/2)
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
check_mode: no
register: kubectl_get_node
changed_when: False
-- name: check if master is tainted (2/2)
+- name: check if control-plane node is tainted (2/2)
set_fact:
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
-- name: remove taint from master/control-plane node
- when: not kubernetes.dedicated_master
+- name: remove taint from control-plane node
+ when: not kubernetes.dedicated_controlplane_nodes
block:
- - name: remove master taint from node
- when: "'node-role.kubernetes.io/master' in kube_node_taints"
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
-
- name: remove control-plane taint from node
when: "'node-role.kubernetes.io/control-plane' in kube_node_taints"
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-"
-- name: add taint from master/control-plane node
- when: kubernetes.dedicated_master
+ - name: remove deprecated master taint from node
+ when: "'node-role.kubernetes.io/master' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
+
+- name: add taint from control-plane node
+ when: kubernetes.dedicated_controlplane_nodes
block:
- - name: add master taint from node
+ - name: add control-plane taint to node
+ when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
+
+ - name: add deprecated master taint to node
when: "'node-role.kubernetes.io/master' not in kube_node_taints"
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
- ## TODO: enable this once all needed addons and workloads have tolerations set accordingly
- # - name: add control-plane taint from node
- # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
- # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
-
- name: prepare kubectl (1/2)
file:
name: /root/.kube
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
index 0a216414..4584e583 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
@@ -4,8 +4,13 @@
src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2"
dest: /etc/kubernetes/network-plugin.yml
- ## TODO: move to server-side apply (GA since 1.22)
+- name: check if kube-router is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml
+ failed_when: false
+ changed_when: false
+ register: kube_router_diff_result
+
- name: install kube-router on to the cluster
+ when: kube_router_diff_result.rc != 0
command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
- register: kube_router_apply_result
- changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
index a572ca89..66dac49b 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
@@ -7,8 +7,13 @@
src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2"
dest: /etc/kubernetes/network-plugin.yml
- ## TODO: move to server-side apply (GA since 1.22)
+ - name: check if kubeguard (kube-router) is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml
+ failed_when: false
+ changed_when: false
+ register: kubeguard_diff_result
+
- name: install kubeguard (kube-router) on to the cluster
+ when: kubeguard_diff_result.rc != 0
command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
- register: kubeguard_apply_result
- changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
index bf1a16d5..bf1a16d5 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_none.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
index 6fb63d09..65a6f7c8 100644
--- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
@@ -4,7 +4,6 @@
path: /etc/kubernetes/kubelet.conf
register: kubeconfig_kubelet_stats
- ## TODO: switch to kubeadm config version v1beta3 (available since 1.22)
- name: generate kubeadm.config
template:
src: kubeadm.config.j2
@@ -25,11 +24,11 @@
# check_mode: no
# register: kubeadm_token_generate
- - name: initialize kubernetes master and store log
+ - name: initialize kubernetes primary control-plane node and store log
block:
- - name: initialize kubernetes master
- command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
- # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ - name: initialize kubernetes primary control-plane node
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
register: kubeadm_init
@@ -47,7 +46,7 @@
content: "{{ kubeadm_init.stderr }}\n"
dest: /etc/kubernetes/kubeadm-init.errors
- - name: create bootstrap token for existing cluster
+ - name: create bootstrap token for new cluster
command: kubeadm token create --ttl 42m
check_mode: no
register: kubeadm_token_generate
@@ -118,11 +117,16 @@
src: node-local-dns.yml.j2
dest: /etc/kubernetes/node-local-dns.yml
- ## TODO: move to server-side apply (GA since 1.22)
-- name: install node-local dns cache
+- name: check if node-local dns cache is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/node-local-dns.yml
+ failed_when: false
+ changed_when: false
+ register: kube_node_local_dns_diff_result
+
+- name: install node-local dns cache
+ when: kube_node_local_dns_diff_result.rc != 0
command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml
- register: kube_node_local_dns_apply_result
- changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
## Network Plugin
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
index 4759b7fd..a2dbe081 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
@@ -1,7 +1,7 @@
---
-- name: fetch secrets needed for secondary master
+- name: fetch secrets needed for secondary control-plane node
run_once: true
- delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}"
block:
- name: fetch list of current nodes
@@ -15,7 +15,7 @@
kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
- name: upload certs
- when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0"
+ when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
command: kubeadm init phase upload-certs --upload-certs
check_mode: no
register: kubeadm_upload_certs
@@ -25,9 +25,9 @@
set_fact:
kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}"
-- name: join kubernetes secondary master node and store log
+- name: join kubernetes secondary control-plane node and store log
block:
- - name: join kubernetes secondary master node
+ - name: join kubernetes secondary control-plane node
throttle: 1
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
@@ -49,7 +49,7 @@
dest: /etc/kubernetes/kubeadm-join.errors
# TODO: acutally check if node has registered
-- name: give the new master(s) a moment to register
+- name: give the new control-plane node(s) a moment to register
when: kubeadm_join is changed
pause: # noqa 503
seconds: 5
diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
index 345c9bf9..345c9bf9 100644
--- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
index 2fa98ed6..a0f3efe7 100644
--- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
@@ -1,6 +1,6 @@
-{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #}
+{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 #}
{# #}
-apiVersion: kubeadm.k8s.io/v1beta2
+apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
{# TODO: this is ugly but we want to create our own token so we can #}
{# better control it's lifetime #}
@@ -11,10 +11,14 @@ localAPIEndpoint:
{% if kubernetes_overlay_node_ip is defined %}
advertiseAddress: {{ kubernetes_overlay_node_ip }}
{% endif %}
+{% if kubernetes_network_plugin_replaces_kube_proxy %}
+skipPhases:
+- addon/kube-proxy
+{% endif %}
nodeRegistration:
criSocket: {{ kubernetes_cri_socket }}
---
-apiVersion: kubeadm.k8s.io/v1beta2
+apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: {{ kubernetes_version }}
clusterName: {{ kubernetes.cluster_name }}
@@ -43,8 +47,6 @@ controllerManager:
extraArgs:
node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}"
scheduler: {}
-dns:
- type: CoreDNS
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
index a2660db2..a2660db2 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
index 382164cb..382164cb 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
index 382164cb..382164cb 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
index e343f4a7..e343f4a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
index ec30d670..ec30d670 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
index d536d5a7..d536d5a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml
index 71ed0d04..45020963 100644
--- a/roles/kubernetes/kubeadm/prune/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: remove nodes from api server
run_once: true
- delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}"
loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
command: "kubectl delete node {{ item }}"
diff --git a/roles/kubernetes/kubeadm/upgrade b/roles/kubernetes/kubeadm/upgrade
index c2f97d40..2cfa18cd 100644
--- a/roles/kubernetes/kubeadm/upgrade
+++ b/roles/kubernetes/kubeadm/upgrade
@@ -1,8 +1,8 @@
Cluster Upgrades:
=================
-primary master:
----------------
+primary control-plane node:
+---------------------------
VERSION=1.23.1
@@ -26,8 +26,8 @@ apt-get update && apt-get install -y "kubelet=$VERSION-00" "kubectl=$VERSION-00"
kubectl uncordon $(hostname)
-secondary master:
------------------
+secondary control-plane node:
+-----------------------------
VERSION=1.23.1
@@ -55,7 +55,7 @@ apt-get update
sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubeadm.pref
apt-get install -y "kubeadm=$VERSION-00"
-@primary master: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data
+@primary control-plane node: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data
kubeadm upgrade node
sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubelet.pref
@@ -64,4 +64,4 @@ apt-get update && apt-get install -y kubelet="$VERSION-00" "kubectl=$VERSION-00"
// security updates + reboot ?
-@primary master: kubectl uncordon <node>
+@primary control-plane node: kubectl uncordon <node>
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/worker/tasks/main.yml
index 13937bcf..eabb7a1f 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/worker/tasks/main.yml
@@ -1,7 +1,7 @@
---
-- name: join kubernetes node and store log
+- name: join kubernetes worker node and store log
block:
- - name: join kubernetes node
+ - name: join kubernetes worker node
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'"
args:
creates: /etc/kubernetes/kubelet.conf