summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--chaos-at-home/k8s-chtest.yml6
-rw-r--r--common/kubernetes-cluster-cleanup.yml2
-rw-r--r--common/kubernetes-cluster-layout.yml30
-rw-r--r--common/kubernetes-cluster.yml18
-rw-r--r--dan/emc-ctrl.yml (renamed from dan/emc-master.yml)2
-rw-r--r--dan/k8s-emc.yml6
-rw-r--r--inventory/group_vars/chaos-at-home/network.yml2
-rw-r--r--inventory/group_vars/k8s-chtest/vars.yml6
-rw-r--r--inventory/group_vars/k8s-emc/vars.yml6
-rw-r--r--inventory/group_vars/vmhost-sk-2019vm/vars.yml6
-rw-r--r--inventory/host_vars/ch-dione.yml33
-rw-r--r--inventory/host_vars/ch-helene.yml33
-rw-r--r--inventory/host_vars/ch-k8s-ctrl.yml (renamed from inventory/host_vars/ch-k8s-master.yml)0
-rw-r--r--inventory/host_vars/emc-ctrl.yml (renamed from inventory/host_vars/emc-master.yml)0
-rw-r--r--inventory/hosts.ini22
-rw-r--r--roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2156
-rw-r--r--roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2155
-rw-r--r--roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j28
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/main.yml (renamed from roles/kubernetes/kubeadm/master/tasks/main.yml)43
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml (renamed from roles/kubernetes/kubeadm/master/tasks/net_none.yml)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/primary.yml (renamed from roles/kubernetes/kubeadm/master/tasks/primary-master.yml)4
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml (renamed from roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml)12
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 (renamed from roles/kubernetes/kubeadm/master/templates/encryption-config.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 (renamed from roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 (renamed from roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/upgrade12
-rw-r--r--roles/kubernetes/kubeadm/worker/tasks/main.yml (renamed from roles/kubernetes/kubeadm/node/tasks/main.yml)4
35 files changed, 134 insertions, 434 deletions
diff --git a/chaos-at-home/k8s-chtest.yml b/chaos-at-home/k8s-chtest.yml
index 5f7c830e..01ba7db8 100644
--- a/chaos-at-home/k8s-chtest.yml
+++ b/chaos-at-home/k8s-chtest.yml
@@ -12,8 +12,8 @@
vars:
kubernetes_cluster_layout:
nodes_group: k8s-chtest
- masters:
- - ch-k8s-master
+ controlplane_nodes:
+ - ch-k8s-ctrl
### hack hack hack...
- name: cook kubernetes secrets
@@ -30,6 +30,6 @@
- import_playbook: ../common/kubernetes-cluster-cleanup.yml
- name: install addons
- hosts: _kubernetes_primary_master_
+ hosts: _kubernetes_primary_controlplane_node_
roles:
- role: kubernetes/addons/metrics-server
diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml
index 5647e3d6..87e59d31 100644
--- a/common/kubernetes-cluster-cleanup.yml
+++ b/common/kubernetes-cluster-cleanup.yml
@@ -1,6 +1,6 @@
---
- name: check for nodes to be removed
- hosts: _kubernetes_primary_master_
+ hosts: _kubernetes_primary_controlplane_node_
tasks:
- name: fetch list of current nodes
command: kubectl get nodes -o name
diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml
index bd972a6d..31751a8d 100644
--- a/common/kubernetes-cluster-layout.yml
+++ b/common/kubernetes-cluster-layout.yml
@@ -5,20 +5,20 @@
gather_facts: no
run_once: yes
tasks:
- - name: sanity check - fail if masters are not included in nodes
+ - name: sanity check - fail if control-plane nodes are not included in node group
assert:
- msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)"
- that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0
+ msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (control-plane and worker)"
+ that: kubernetes_cluster_layout.controlplane_nodes | difference(ansible_play_hosts_all) | length == 0
- - name: sanity check - fail if primary master is not in masters
+ - name: sanity check - fail if primary control-group node is not in control-group node list
assert:
- msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
- that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters
+ msg: "kubernetes_cluster_layout.controlplane_nodes must include kubernetes_cluster_layout.primary_controlplane_node"
+ that: kubernetes_cluster_layout.primary_controlplane_node is undefined or kubernetes_cluster_layout.primary_controlplane_node in kubernetes_cluster_layout.controlplane_nodes
- - name: sanity check - fail on multiple masters if no primary master is configured
+ - name: sanity check - fail on multiple control-plane nodes but no primary is configured
assert:
- msg: "for multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
- that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined
+ msg: "for multiple control-plane nodes to work you need to define kubernetes_cluster_layout.primary_controlplane_node"
+ that: (kubernetes_cluster_layout.controlplane_nodes | length) == 1 or kubernetes_cluster_layout.primary_controlplane_node is defined
- name: create group for all kubernetes nodes
loop: "{{ ansible_play_hosts_all }}"
@@ -28,19 +28,19 @@
group: _kubernetes_nodes_
changed_when: False
- - name: create group for kubernetes master nodes
- loop: "{{ kubernetes_cluster_layout.masters }}"
+ - name: create group for kubernetes control-plane nodes
+ loop: "{{ kubernetes_cluster_layout.controlplane_nodes }}"
add_host:
name: "{{ item }}"
inventory_dir: "{{ hostvars[item].inventory_dir }}"
- group: _kubernetes_masters_
+ group: _kubernetes_controlplane_nodes_
changed_when: False
- - name: create group for kubernetes primary master
+ - name: create group for kubernetes primary control-plane node
vars:
- item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
+ item: "{{ kubernetes_cluster_layout.primary_controlplane_node | default(kubernetes_cluster_layout.controlplane_nodes[0]) }}"
add_host:
name: "{{ item }}"
inventory_dir: "{{ hostvars[item].inventory_dir }}"
- group: _kubernetes_primary_master_
+ group: _kubernetes_primary_controlplane_node_
changed_when: False
diff --git a/common/kubernetes-cluster.yml b/common/kubernetes-cluster.yml
index 4ee91dd3..6958db15 100644
--- a/common/kubernetes-cluster.yml
+++ b/common/kubernetes-cluster.yml
@@ -40,19 +40,19 @@
- role: kubernetes/base
- role: kubernetes/kubeadm/base
-- name: configure primary kubernetes master
- hosts: _kubernetes_primary_master_
+- name: configure primary kubernetes control-plane node
+ hosts: _kubernetes_primary_controlplane_node_
roles:
- - role: kubernetes/kubeadm/master
+ - role: kubernetes/kubeadm/control-plane
-- name: configure secondary kubernetes masters
- hosts: _kubernetes_masters_:!_kubernetes_primary_master_
+- name: configure secondary kubernetes control-plane nodes
+ hosts: _kubernetes_controlplane_nodes_:!_kubernetes_primary_controlplane_node_
roles:
- - role: kubernetes/kubeadm/master
+ - role: kubernetes/kubeadm/control-plane
-- name: configure kubernetes non-master nodes
- hosts: _kubernetes_nodes_:!_kubernetes_masters_
+- name: configure kubernetes worker nodes
+ hosts: _kubernetes_nodes_:!_kubernetes_controlplane_nodes_
roles:
- - role: kubernetes/kubeadm/node
+ - role: kubernetes/kubeadm/worker
### TODO: add node labels (ie. for ingress daeomnset)
diff --git a/dan/emc-master.yml b/dan/emc-ctrl.yml
index edfc0ffe..285faaaa 100644
--- a/dan/emc-master.yml
+++ b/dan/emc-ctrl.yml
@@ -1,6 +1,6 @@
---
- name: Basic Setup
- hosts: emc-master
+ hosts: emc-ctrl
roles:
- role: apt-repo/base
- role: core/base
diff --git a/dan/k8s-emc.yml b/dan/k8s-emc.yml
index 468919ef..75e81c8b 100644
--- a/dan/k8s-emc.yml
+++ b/dan/k8s-emc.yml
@@ -14,8 +14,8 @@
vars:
kubernetes_cluster_layout:
nodes_group: k8s-emc
- masters:
- - emc-master
+ controlplane_nodes:
+ - emc-ctrl
### hack hack hack...
- name: cook kubernetes secrets
@@ -32,6 +32,6 @@
- import_playbook: ../common/kubernetes-cluster-cleanup.yml
- name: install addons
- hosts: _kubernetes_primary_master_
+ hosts: _kubernetes_primary_controlplane_node_
roles:
- role: kubernetes/addons/metrics-server
diff --git a/inventory/group_vars/chaos-at-home/network.yml b/inventory/group_vars/chaos-at-home/network.yml
index 2957a24a..46564977 100644
--- a/inventory/group_vars/chaos-at-home/network.yml
+++ b/inventory/group_vars/chaos-at-home/network.yml
@@ -68,7 +68,7 @@ network_zones:
ch-http-proxy: 8
ch-imap-proxy: 9
ch-vpn: 10
- ch-k8s-master: 20
+ ch-k8s-ctrl: 20
ch-jump: 22
ch-gw-lan: 28
ch-iot: 30
diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml
index e01b996d..66824314 100644
--- a/inventory/group_vars/k8s-chtest/vars.yml
+++ b/inventory/group_vars/k8s-chtest/vars.yml
@@ -1,15 +1,15 @@
---
-kubernetes_version: 1.23.1
+kubernetes_version: 1.23.6
kubernetes_container_runtime: containerd
kubernetes_network_plugin: kube-router
kubernetes_network_plugin_version: 1.4.0
-kubernetes_network_plugin_replaces_kube_proxy: true
+kubernetes_network_plugin_replaces_kube_proxy: yes
kubernetes:
cluster_name: chtest
- dedicated_master: True
+ dedicated_controlplane_nodes: yes
api_extra_sans:
- 192.168.32.20
diff --git a/inventory/group_vars/k8s-emc/vars.yml b/inventory/group_vars/k8s-emc/vars.yml
index c13e610c..b2a8fe39 100644
--- a/inventory/group_vars/k8s-emc/vars.yml
+++ b/inventory/group_vars/k8s-emc/vars.yml
@@ -6,10 +6,10 @@ kubernetes_network_plugin: kubeguard
kubernetes:
cluster_name: emc
- dedicated_master: False
+ dedicated_controlplane_nodes: yes
api_extra_sans:
- 178.63.180.137
- - emc-master.elev8.at
+ - emc-ctrl.elev8.at
pod_ip_range: 172.18.0.0/16
pod_ip_range_size: 24
@@ -37,7 +37,7 @@ kubeguard:
emc-dist0: 110
ele-dione: 111
ele-helene: 112
- emc-master: 127
+ emc-ctrl: 127
direct_net_zones:
encoder:
diff --git a/inventory/group_vars/vmhost-sk-2019vm/vars.yml b/inventory/group_vars/vmhost-sk-2019vm/vars.yml
index 221fa581..41f8b9db 100644
--- a/inventory/group_vars/vmhost-sk-2019vm/vars.yml
+++ b/inventory/group_vars/vmhost-sk-2019vm/vars.yml
@@ -11,8 +11,7 @@ vm_host:
prefix: 192.168.250.0/24
offsets:
sk-torrent: 136
- emc-master: 137
-# lw-master: 137
+ emc-ctrl: 137
ele-gwhetzner: 138
sgg-icecast: 141
ch-mimas: 142
@@ -24,8 +23,7 @@ vm_host:
prefix: 178.63.180.136/29
offsets:
sk-torrent: 0
- emc-master: 1
-# lw-master: 1
+ emc-ctrl: 1
ele-gwhetzner: 2
sgg-icecast: 5
ch-mimas: 6
diff --git a/inventory/host_vars/ch-dione.yml b/inventory/host_vars/ch-dione.yml
index a3cbbe68..ef9d8657 100644
--- a/inventory/host_vars/ch-dione.yml
+++ b/inventory/host_vars/ch-dione.yml
@@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6
blackmagic_desktopvideo_include_gui: yes
-docker_pkg_provider: docker-com
-docker_storage:
+# docker_pkg_provider: docker-com
+# docker_storage:
+# type: lvm
+# vg: "{{ host_name }}"
+# lv: docker
+# size: 10G
+# fs: ext4
+
+containerd_storage:
type: lvm
vg: "{{ host_name }}"
- lv: docker
+ lv: containerd
size: 10G
fs: ext4
@@ -63,15 +70,15 @@ kubelet_storage:
size: 5G
fs: ext4
-kubernetes_version: 1.24.0
-kubernetes_container_runtime: docker
-kubernetes_standalone_max_pods: 42
-kubernetes_standalone_cni_variant: with-portmap
+# kubernetes_version: 1.24.0
+# kubernetes_container_runtime: docker
+# kubernetes_standalone_max_pods: 42
+# kubernetes_standalone_cni_variant: with-portmap
-rtmp_streamer_base_path: /srv/storage/streamer
-rtmp_streamer_inst_name: feed
-rtmp_streamer_nginx_image_version: 2022-04-29.23
-rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)"
-rtmp_streamer_config:
- input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i']
+# rtmp_streamer_base_path: /srv/storage/streamer
+# rtmp_streamer_inst_name: feed
+# rtmp_streamer_nginx_image_version: 2022-04-29.23
+# rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)"
+# rtmp_streamer_config:
+# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i']
diff --git a/inventory/host_vars/ch-helene.yml b/inventory/host_vars/ch-helene.yml
index c524bf6a..816b38f8 100644
--- a/inventory/host_vars/ch-helene.yml
+++ b/inventory/host_vars/ch-helene.yml
@@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6
blackmagic_desktopvideo_include_gui: yes
-docker_pkg_provider: docker-com
-docker_storage:
+# docker_pkg_provider: docker-com
+# docker_storage:
+# type: lvm
+# vg: "{{ host_name }}"
+# lv: docker
+# size: 10G
+# fs: ext4
+
+containerd_storage:
type: lvm
vg: "{{ host_name }}"
- lv: docker
+ lv: containerd
size: 10G
fs: ext4
@@ -63,15 +70,15 @@ kubelet_storage:
size: 5G
fs: ext4
-kubernetes_version: 1.24.0
-kubernetes_container_runtime: docker
-kubernetes_standalone_max_pods: 42
-kubernetes_standalone_cni_variant: with-portmap
+# kubernetes_version: 1.24.0
+# kubernetes_container_runtime: docker
+# kubernetes_standalone_max_pods: 42
+# kubernetes_standalone_cni_variant: with-portmap
-rtmp_streamer_base_path: /srv/storage/streamer
-rtmp_streamer_inst_name: feed
-rtmp_streamer_nginx_image_version: 2022-04-29.23
-rtmp_streamer_decklink_card: "DeckLink SDI 4K"
-rtmp_streamer_config:
- input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i']
+# rtmp_streamer_base_path: /srv/storage/streamer
+# rtmp_streamer_inst_name: feed
+# rtmp_streamer_nginx_image_version: 2022-04-29.23
+# rtmp_streamer_decklink_card: "DeckLink SDI 4K"
+# rtmp_streamer_config:
+# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i']
diff --git a/inventory/host_vars/ch-k8s-master.yml b/inventory/host_vars/ch-k8s-ctrl.yml
index 63723000..63723000 100644
--- a/inventory/host_vars/ch-k8s-master.yml
+++ b/inventory/host_vars/ch-k8s-ctrl.yml
diff --git a/inventory/host_vars/emc-master.yml b/inventory/host_vars/emc-ctrl.yml
index 1ca011ec..1ca011ec 100644
--- a/inventory/host_vars/emc-master.yml
+++ b/inventory/host_vars/emc-ctrl.yml
diff --git a/inventory/hosts.ini b/inventory/hosts.ini
index 74e37925..581913b6 100644
--- a/inventory/hosts.ini
+++ b/inventory/hosts.ini
@@ -51,7 +51,7 @@ ch-calypso host_name=calypso
ch-thetys host_name=thetys
ch-dione host_name=dione
ch-helene host_name=helene
-ch-k8s-master host_name=k8s-master
+ch-k8s-ctrl host_name=k8s-ctrl
ch-hpws-maxi
ch-hpws-mini1
ch-alix1d
@@ -246,7 +246,7 @@ host_domain=elev8.at
env_group=dan
[emc]
-emc-master
+emc-ctrl
[emc:children]
emc-dist
@@ -342,7 +342,7 @@ ch-iot
ch-vpn
ch-mon
ch-omd
-ch-k8s-master
+ch-k8s-ctrl
ch-installsmb
[vmhost-ch-prometheus]
ch-prometheus
@@ -364,7 +364,7 @@ sk-testvm
sk-torrent
ch-mimas
ele-gwhetzner
-emc-master
+emc-ctrl
sgg-icecast
[vmhost-sk-2019vm]
sk-2019vm
@@ -468,7 +468,7 @@ emc-dist
emc-xx
[hetzner]
-emc-master
+emc-ctrl
sk-testvm
sk-torrent
sgg-icecast
@@ -548,11 +548,11 @@ emc-dist
[k8s-emc-streamer:children]
emc-xx
-[k8s-emc-master]
-emc-master
+[k8s-emc-ctrl]
+emc-ctrl
[k8s-emc:children]
-k8s-emc-master
+k8s-emc-ctrl
k8s-emc-encoder
k8s-emc-distribution
k8s-emc-streamer
@@ -563,9 +563,9 @@ k8s-emc-streamer
ch-dione
ch-helene
-[k8s-chtest-master]
-ch-k8s-master
+[k8s-chtest-ctrl]
+ch-k8s-ctrl
[k8s-chtest:children]
-k8s-chtest-master
+k8s-chtest-ctrl
k8s-chtest-encoder
diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2
deleted file mode 100644
index 1e3789bb..00000000
--- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2
+++ /dev/null
@@ -1,156 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:aggregated-metrics-reader
- labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
- rbac.authorization.k8s.io/aggregate-to-admin: "true"
-rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: metrics-server:system:auth-delegator
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:auth-delegator
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- k8s-app: metrics-server
-spec:
- selector:
- matchLabels:
- k8s-app: metrics-server
- template:
- metadata:
- name: metrics-server
- labels:
- k8s-app: metrics-server
- spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
- containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server-amd64:v0.3.6
- imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --kubelet-insecure-tls
- - --kubelet-preferred-address-types=InternalIP,ExternalIP
- ports:
- - name: main-port
- containerPort: 4443
- protocol: TCP
- securityContext:
- readOnlyRootFilesystem: true
- runAsNonRoot: true
- runAsUser: 1000
- volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
- nodeSelector:
- kubernetes.io/os: linux
- kubernetes.io/arch: "amd64"
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
----
-apiVersion: v1
-kind: Service
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
- k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2
deleted file mode 100644
index fc8d287b..00000000
--- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2
+++ /dev/null
@@ -1,155 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:aggregated-metrics-reader
- labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
- rbac.authorization.k8s.io/aggregate-to-admin: "true"
-rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: metrics-server:system:auth-delegator
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:auth-delegator
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: metrics-server
- namespace: kube-system
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- k8s-app: metrics-server
-spec:
- selector:
- matchLabels:
- k8s-app: metrics-server
- template:
- metadata:
- name: metrics-server
- labels:
- k8s-app: metrics-server
- spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
- containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7
- imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --kubelet-insecure-tls
- - --kubelet-preferred-address-types=InternalIP,ExternalIP
- ports:
- - name: main-port
- containerPort: 4443
- protocol: TCP
- securityContext:
- readOnlyRootFilesystem: true
- runAsNonRoot: true
- runAsUser: 1000
- volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
- nodeSelector:
- kubernetes.io/os: linux
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
----
-apiVersion: v1
-kind: Service
-metadata:
- name: metrics-server
- namespace: kube-system
- labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
- k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
index 2e0eaf5d..19118b2e 100644
--- a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
+++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
@@ -16,7 +16,7 @@ defaults
option dontlog-normal
frontend kube_api
-{% if '_kubernetes_masters_' in group_names %}
+{% if '_kubernetes_controlplane_nodes_' in group_names %}
bind *:6443
{% else %}
bind 127.0.0.1:6443
@@ -25,7 +25,7 @@ frontend kube_api
default_backend kube_api
backend kube_api
-{% if '_kubernetes_masters_' in group_names %}
+{% if '_kubernetes_controlplane_nodes_' in group_names %}
balance first
{% else %}
balance roundrobin
@@ -36,6 +36,6 @@ backend kube_api
default-server inter 5s fall 3 rise 2
timeout connect 5s
timeout server 3h
-{% for master in groups['_kubernetes_masters_'] %}
- server {{ master }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none
+{% for node in groups['_kubernetes_controlplane_nodes_'] %}
+ server {{ node }} {{ hostvars[node].kubernetes_overlay_node_ip | default(hostvars[node].ansible_default_ipv4.address) }}:6442 {% if node == inventory_hostname %}id 1{% endif %} check check-ssl verify none
{% endfor %}
diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
index 04df760f..d5bd378e 100644
--- a/roles/kubernetes/kubeadm/master/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
@@ -12,48 +12,47 @@
mode: 0600
-- name: install primary master
- include_tasks: primary-master.yml
- when: "'_kubernetes_primary_master_' in group_names"
+- name: install primary control-plane node
+ include_tasks: primary.yml
+ when: "'_kubernetes_primary_controlplane_node_' in group_names"
-- name: install secondary masters
- include_tasks: secondary-masters.yml
- when: "'_kubernetes_primary_master_' not in group_names"
+- name: install secondary control-plane nodes
+ include_tasks: secondary.yml
+ when: "'_kubernetes_primary_controlplane_node_' not in group_names"
-- name: check if master is tainted (1/2)
+- name: check if control-plane node is tainted (1/2)
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
check_mode: no
register: kubectl_get_node
changed_when: False
-- name: check if master is tainted (2/2)
+- name: check if control-plane node is tainted (2/2)
set_fact:
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
-- name: remove taint from master/control-plane node
- when: not kubernetes.dedicated_master
+- name: remove taint from control-plane node
+ when: not kubernetes.dedicated_controlplane_nodes
block:
- - name: remove master taint from node
- when: "'node-role.kubernetes.io/master' in kube_node_taints"
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
-
- name: remove control-plane taint from node
when: "'node-role.kubernetes.io/control-plane' in kube_node_taints"
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-"
-- name: add taint from master/control-plane node
- when: kubernetes.dedicated_master
+ - name: remove deprecated master taint from node
+ when: "'node-role.kubernetes.io/master' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
+
+- name: add taint from control-plane node
+ when: kubernetes.dedicated_controlplane_nodes
block:
- - name: add master taint from node
+ - name: add control-plane taint to node
+ when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
+
+ - name: add deprecated master taint to node
when: "'node-role.kubernetes.io/master' not in kube_node_taints"
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
- ## TODO: enable this once all needed addons and workloads have tolerations set accordingly
- # - name: add control-plane taint from node
- # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
- # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
-
- name: prepare kubectl (1/2)
file:
name: /root/.kube
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
index 0a216414..0a216414 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
index a572ca89..a572ca89 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
index bf1a16d5..bf1a16d5 100644
--- a/roles/kubernetes/kubeadm/master/tasks/net_none.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
index 6fb63d09..22a5af42 100644
--- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
@@ -25,9 +25,9 @@
# check_mode: no
# register: kubeadm_token_generate
- - name: initialize kubernetes master and store log
+ - name: initialize kubernetes primary control-plane node and store log
block:
- - name: initialize kubernetes master
+ - name: initialize kubernetes primary control-plane node
command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
# command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
index 4759b7fd..a2dbe081 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
@@ -1,7 +1,7 @@
---
-- name: fetch secrets needed for secondary master
+- name: fetch secrets needed for secondary control-plane node
run_once: true
- delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}"
block:
- name: fetch list of current nodes
@@ -15,7 +15,7 @@
kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
- name: upload certs
- when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0"
+ when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
command: kubeadm init phase upload-certs --upload-certs
check_mode: no
register: kubeadm_upload_certs
@@ -25,9 +25,9 @@
set_fact:
kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}"
-- name: join kubernetes secondary master node and store log
+- name: join kubernetes secondary control-plane node and store log
block:
- - name: join kubernetes secondary master node
+ - name: join kubernetes secondary control-plane node
throttle: 1
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
@@ -49,7 +49,7 @@
dest: /etc/kubernetes/kubeadm-join.errors
# TODO: acutally check if node has registered
-- name: give the new master(s) a moment to register
+- name: give the new control-plane node(s) a moment to register
when: kubeadm_join is changed
pause: # noqa 503
seconds: 5
diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
index 345c9bf9..345c9bf9 100644
--- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
index 2fa98ed6..2fa98ed6 100644
--- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
index a2660db2..a2660db2 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
index 382164cb..382164cb 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
index 382164cb..382164cb 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
index e343f4a7..e343f4a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
index ec30d670..ec30d670 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
index d536d5a7..d536d5a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml
index 71ed0d04..45020963 100644
--- a/roles/kubernetes/kubeadm/prune/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: remove nodes from api server
run_once: true
- delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}"
loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
command: "kubectl delete node {{ item }}"
diff --git a/roles/kubernetes/kubeadm/upgrade b/roles/kubernetes/kubeadm/upgrade
index c2f97d40..2cfa18cd 100644
--- a/roles/kubernetes/kubeadm/upgrade
+++ b/roles/kubernetes/kubeadm/upgrade
@@ -1,8 +1,8 @@
Cluster Upgrades:
=================
-primary master:
----------------
+primary control-plane node:
+---------------------------
VERSION=1.23.1
@@ -26,8 +26,8 @@ apt-get update && apt-get install -y "kubelet=$VERSION-00" "kubectl=$VERSION-00"
kubectl uncordon $(hostname)
-secondary master:
------------------
+secondary control-plane node:
+-----------------------------
VERSION=1.23.1
@@ -55,7 +55,7 @@ apt-get update
sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubeadm.pref
apt-get install -y "kubeadm=$VERSION-00"
-@primary master: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data
+@primary control-plane node: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data
kubeadm upgrade node
sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubelet.pref
@@ -64,4 +64,4 @@ apt-get update && apt-get install -y kubelet="$VERSION-00" "kubectl=$VERSION-00"
// security updates + reboot ?
-@primary master: kubectl uncordon <node>
+@primary control-plane node: kubectl uncordon <node>
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/worker/tasks/main.yml
index 13937bcf..eabb7a1f 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/worker/tasks/main.yml
@@ -1,7 +1,7 @@
---
-- name: join kubernetes node and store log
+- name: join kubernetes worker node and store log
block:
- - name: join kubernetes node
+ - name: join kubernetes worker node
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'"
args:
creates: /etc/kubernetes/kubelet.conf