diff options
author | Christian Pointner <equinox@spreadspace.org> | 2022-05-08 02:19:02 +0200 |
---|---|---|
committer | Christian Pointner <equinox@spreadspace.org> | 2022-05-08 02:19:02 +0200 |
commit | 382f294e9b1dbcc2cb298d6a0cc80234dffcab82 (patch) | |
tree | c4339e341070ef5b2012fd8a9cced42217decd62 | |
parent | prepare ch-dione and ch-helene for chtest k8s cluster (diff) | |
parent | kubernetes/kubeadm: fix kubeguard network plugin (diff) |
Merge branch 'topic/k8s-1.24'
41 files changed, 333 insertions, 411 deletions
diff --git a/chaos-at-home/k8s-chtest.yml b/chaos-at-home/k8s-chtest.yml index 5f7c830e..01ba7db8 100644 --- a/chaos-at-home/k8s-chtest.yml +++ b/chaos-at-home/k8s-chtest.yml @@ -12,8 +12,8 @@ vars: kubernetes_cluster_layout: nodes_group: k8s-chtest - masters: - - ch-k8s-master + controlplane_nodes: + - ch-k8s-ctrl ### hack hack hack... - name: cook kubernetes secrets @@ -30,6 +30,6 @@ - import_playbook: ../common/kubernetes-cluster-cleanup.yml - name: install addons - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ roles: - role: kubernetes/addons/metrics-server diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml index 5647e3d6..87e59d31 100644 --- a/common/kubernetes-cluster-cleanup.yml +++ b/common/kubernetes-cluster-cleanup.yml @@ -1,6 +1,6 @@ --- - name: check for nodes to be removed - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ tasks: - name: fetch list of current nodes command: kubectl get nodes -o name diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml index bd972a6d..31751a8d 100644 --- a/common/kubernetes-cluster-layout.yml +++ b/common/kubernetes-cluster-layout.yml @@ -5,20 +5,20 @@ gather_facts: no run_once: yes tasks: - - name: sanity check - fail if masters are not included in nodes + - name: sanity check - fail if control-plane nodes are not included in node group assert: - msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)" - that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0 + msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (control-plane and worker)" + that: kubernetes_cluster_layout.controlplane_nodes | difference(ansible_play_hosts_all) | length == 0 - - name: sanity check - fail if primary master is not in masters + - name: sanity check - fail if primary control-group node is not in control-group node list assert: - msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" - that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters + msg: "kubernetes_cluster_layout.controlplane_nodes must include kubernetes_cluster_layout.primary_controlplane_node" + that: kubernetes_cluster_layout.primary_controlplane_node is undefined or kubernetes_cluster_layout.primary_controlplane_node in kubernetes_cluster_layout.controlplane_nodes - - name: sanity check - fail on multiple masters if no primary master is configured + - name: sanity check - fail on multiple control-plane nodes but no primary is configured assert: - msg: "for multiple masters to work you need to define kubernetes_cluster_layout.primary_master" - that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined + msg: "for multiple control-plane nodes to work you need to define kubernetes_cluster_layout.primary_controlplane_node" + that: (kubernetes_cluster_layout.controlplane_nodes | length) == 1 or kubernetes_cluster_layout.primary_controlplane_node is defined - name: create group for all kubernetes nodes loop: "{{ ansible_play_hosts_all }}" @@ -28,19 +28,19 @@ group: _kubernetes_nodes_ changed_when: False - - name: create group for kubernetes master nodes - loop: "{{ kubernetes_cluster_layout.masters }}" + - name: create group for kubernetes control-plane nodes + loop: "{{ kubernetes_cluster_layout.controlplane_nodes }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[item].inventory_dir }}" - group: _kubernetes_masters_ + group: _kubernetes_controlplane_nodes_ changed_when: False - - name: create group for kubernetes primary master + - name: create group for kubernetes primary control-plane node vars: - item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" + item: "{{ kubernetes_cluster_layout.primary_controlplane_node | default(kubernetes_cluster_layout.controlplane_nodes[0]) }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[item].inventory_dir }}" - group: _kubernetes_primary_master_ + group: _kubernetes_primary_controlplane_node_ changed_when: False diff --git a/common/kubernetes-cluster.yml b/common/kubernetes-cluster.yml index 4ee91dd3..6958db15 100644 --- a/common/kubernetes-cluster.yml +++ b/common/kubernetes-cluster.yml @@ -40,19 +40,19 @@ - role: kubernetes/base - role: kubernetes/kubeadm/base -- name: configure primary kubernetes master - hosts: _kubernetes_primary_master_ +- name: configure primary kubernetes control-plane node + hosts: _kubernetes_primary_controlplane_node_ roles: - - role: kubernetes/kubeadm/master + - role: kubernetes/kubeadm/control-plane -- name: configure secondary kubernetes masters - hosts: _kubernetes_masters_:!_kubernetes_primary_master_ +- name: configure secondary kubernetes control-plane nodes + hosts: _kubernetes_controlplane_nodes_:!_kubernetes_primary_controlplane_node_ roles: - - role: kubernetes/kubeadm/master + - role: kubernetes/kubeadm/control-plane -- name: configure kubernetes non-master nodes - hosts: _kubernetes_nodes_:!_kubernetes_masters_ +- name: configure kubernetes worker nodes + hosts: _kubernetes_nodes_:!_kubernetes_controlplane_nodes_ roles: - - role: kubernetes/kubeadm/node + - role: kubernetes/kubeadm/worker ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/dan/emc-master.yml b/dan/emc-ctrl.yml index edfc0ffe..285faaaa 100644 --- a/dan/emc-master.yml +++ b/dan/emc-ctrl.yml @@ -1,6 +1,6 @@ --- - name: Basic Setup - hosts: emc-master + hosts: emc-ctrl roles: - role: apt-repo/base - role: core/base diff --git a/dan/k8s-emc.yml b/dan/k8s-emc.yml index 468919ef..75e81c8b 100644 --- a/dan/k8s-emc.yml +++ b/dan/k8s-emc.yml @@ -14,8 +14,8 @@ vars: kubernetes_cluster_layout: nodes_group: k8s-emc - masters: - - emc-master + controlplane_nodes: + - emc-ctrl ### hack hack hack... - name: cook kubernetes secrets @@ -32,6 +32,6 @@ - import_playbook: ../common/kubernetes-cluster-cleanup.yml - name: install addons - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ roles: - role: kubernetes/addons/metrics-server diff --git a/inventory/group_vars/chaos-at-home/network.yml b/inventory/group_vars/chaos-at-home/network.yml index 2957a24a..46564977 100644 --- a/inventory/group_vars/chaos-at-home/network.yml +++ b/inventory/group_vars/chaos-at-home/network.yml @@ -68,7 +68,7 @@ network_zones: ch-http-proxy: 8 ch-imap-proxy: 9 ch-vpn: 10 - ch-k8s-master: 20 + ch-k8s-ctrl: 20 ch-jump: 22 ch-gw-lan: 28 ch-iot: 30 diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index e01b996d..ac1a3991 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -1,15 +1,15 @@ --- -kubernetes_version: 1.23.1 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kube-router kubernetes_network_plugin_version: 1.4.0 -kubernetes_network_plugin_replaces_kube_proxy: true +kubernetes_network_plugin_replaces_kube_proxy: yes kubernetes: cluster_name: chtest - dedicated_master: True + dedicated_controlplane_nodes: yes api_extra_sans: - 192.168.32.20 @@ -21,4 +21,4 @@ kubernetes: kubernetes_secrets: encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" -kubernetes_metrics_server_version: 0.5.2 +kubernetes_metrics_server_version: 0.6.1 diff --git a/inventory/group_vars/k8s-emc/vars.yml b/inventory/group_vars/k8s-emc/vars.yml index c13e610c..be1c4818 100644 --- a/inventory/group_vars/k8s-emc/vars.yml +++ b/inventory/group_vars/k8s-emc/vars.yml @@ -1,15 +1,15 @@ --- -kubernetes_version: 1.23.1 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard kubernetes: cluster_name: emc - dedicated_master: False + dedicated_controlplane_nodes: yes api_extra_sans: - 178.63.180.137 - - emc-master.elev8.at + - emc-ctrl.elev8.at pod_ip_range: 172.18.0.0/16 pod_ip_range_size: 24 @@ -37,7 +37,7 @@ kubeguard: emc-dist0: 110 ele-dione: 111 ele-helene: 112 - emc-master: 127 + emc-ctrl: 127 direct_net_zones: encoder: @@ -48,4 +48,4 @@ kubeguard: kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" -kubernetes_metrics_server_version: 0.5.2 +kubernetes_metrics_server_version: 0.6.1 diff --git a/inventory/group_vars/vmhost-sk-2019vm/vars.yml b/inventory/group_vars/vmhost-sk-2019vm/vars.yml index 221fa581..41f8b9db 100644 --- a/inventory/group_vars/vmhost-sk-2019vm/vars.yml +++ b/inventory/group_vars/vmhost-sk-2019vm/vars.yml @@ -11,8 +11,7 @@ vm_host: prefix: 192.168.250.0/24 offsets: sk-torrent: 136 - emc-master: 137 -# lw-master: 137 + emc-ctrl: 137 ele-gwhetzner: 138 sgg-icecast: 141 ch-mimas: 142 @@ -24,8 +23,7 @@ vm_host: prefix: 178.63.180.136/29 offsets: sk-torrent: 0 - emc-master: 1 -# lw-master: 1 + emc-ctrl: 1 ele-gwhetzner: 2 sgg-icecast: 5 ch-mimas: 6 diff --git a/inventory/host_vars/ch-dione.yml b/inventory/host_vars/ch-dione.yml index a3cbbe68..ef9d8657 100644 --- a/inventory/host_vars/ch-dione.yml +++ b/inventory/host_vars/ch-dione.yml @@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6 blackmagic_desktopvideo_include_gui: yes -docker_pkg_provider: docker-com -docker_storage: +# docker_pkg_provider: docker-com +# docker_storage: +# type: lvm +# vg: "{{ host_name }}" +# lv: docker +# size: 10G +# fs: ext4 + +containerd_storage: type: lvm vg: "{{ host_name }}" - lv: docker + lv: containerd size: 10G fs: ext4 @@ -63,15 +70,15 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.24.0 -kubernetes_container_runtime: docker -kubernetes_standalone_max_pods: 42 -kubernetes_standalone_cni_variant: with-portmap +# kubernetes_version: 1.24.0 +# kubernetes_container_runtime: docker +# kubernetes_standalone_max_pods: 42 +# kubernetes_standalone_cni_variant: with-portmap -rtmp_streamer_base_path: /srv/storage/streamer -rtmp_streamer_inst_name: feed -rtmp_streamer_nginx_image_version: 2022-04-29.23 -rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)" -rtmp_streamer_config: - input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] +# rtmp_streamer_base_path: /srv/storage/streamer +# rtmp_streamer_inst_name: feed +# rtmp_streamer_nginx_image_version: 2022-04-29.23 +# rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)" +# rtmp_streamer_config: +# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] diff --git a/inventory/host_vars/ch-helene.yml b/inventory/host_vars/ch-helene.yml index c524bf6a..816b38f8 100644 --- a/inventory/host_vars/ch-helene.yml +++ b/inventory/host_vars/ch-helene.yml @@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6 blackmagic_desktopvideo_include_gui: yes -docker_pkg_provider: docker-com -docker_storage: +# docker_pkg_provider: docker-com +# docker_storage: +# type: lvm +# vg: "{{ host_name }}" +# lv: docker +# size: 10G +# fs: ext4 + +containerd_storage: type: lvm vg: "{{ host_name }}" - lv: docker + lv: containerd size: 10G fs: ext4 @@ -63,15 +70,15 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.24.0 -kubernetes_container_runtime: docker -kubernetes_standalone_max_pods: 42 -kubernetes_standalone_cni_variant: with-portmap +# kubernetes_version: 1.24.0 +# kubernetes_container_runtime: docker +# kubernetes_standalone_max_pods: 42 +# kubernetes_standalone_cni_variant: with-portmap -rtmp_streamer_base_path: /srv/storage/streamer -rtmp_streamer_inst_name: feed -rtmp_streamer_nginx_image_version: 2022-04-29.23 -rtmp_streamer_decklink_card: "DeckLink SDI 4K" -rtmp_streamer_config: - input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] +# rtmp_streamer_base_path: /srv/storage/streamer +# rtmp_streamer_inst_name: feed +# rtmp_streamer_nginx_image_version: 2022-04-29.23 +# rtmp_streamer_decklink_card: "DeckLink SDI 4K" +# rtmp_streamer_config: +# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] diff --git a/inventory/host_vars/ch-k8s-master.yml b/inventory/host_vars/ch-k8s-ctrl.yml index 63723000..63723000 100644 --- a/inventory/host_vars/ch-k8s-master.yml +++ b/inventory/host_vars/ch-k8s-ctrl.yml diff --git a/inventory/host_vars/emc-master.yml b/inventory/host_vars/emc-ctrl.yml index 1ca011ec..1ca011ec 100644 --- a/inventory/host_vars/emc-master.yml +++ b/inventory/host_vars/emc-ctrl.yml diff --git a/inventory/hosts.ini b/inventory/hosts.ini index 74e37925..581913b6 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -51,7 +51,7 @@ ch-calypso host_name=calypso ch-thetys host_name=thetys ch-dione host_name=dione ch-helene host_name=helene -ch-k8s-master host_name=k8s-master +ch-k8s-ctrl host_name=k8s-ctrl ch-hpws-maxi ch-hpws-mini1 ch-alix1d @@ -246,7 +246,7 @@ host_domain=elev8.at env_group=dan [emc] -emc-master +emc-ctrl [emc:children] emc-dist @@ -342,7 +342,7 @@ ch-iot ch-vpn ch-mon ch-omd -ch-k8s-master +ch-k8s-ctrl ch-installsmb [vmhost-ch-prometheus] ch-prometheus @@ -364,7 +364,7 @@ sk-testvm sk-torrent ch-mimas ele-gwhetzner -emc-master +emc-ctrl sgg-icecast [vmhost-sk-2019vm] sk-2019vm @@ -468,7 +468,7 @@ emc-dist emc-xx [hetzner] -emc-master +emc-ctrl sk-testvm sk-torrent sgg-icecast @@ -548,11 +548,11 @@ emc-dist [k8s-emc-streamer:children] emc-xx -[k8s-emc-master] -emc-master +[k8s-emc-ctrl] +emc-ctrl [k8s-emc:children] -k8s-emc-master +k8s-emc-ctrl k8s-emc-encoder k8s-emc-distribution k8s-emc-streamer @@ -563,9 +563,9 @@ k8s-emc-streamer ch-dione ch-helene -[k8s-chtest-master] -ch-k8s-master +[k8s-chtest-ctrl] +ch-k8s-ctrl [k8s-chtest:children] -k8s-chtest-master +k8s-chtest-ctrl k8s-chtest-encoder diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml index 5236e4e3..87c57346 100644 --- a/roles/kubernetes/addons/metrics-server/tasks/main.yml +++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml @@ -9,8 +9,13 @@ src: "components.{{ kubernetes_metrics_server_version }}.yml.j2" dest: /etc/kubernetes/addons/metrics-server/config.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if metrics-server is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/addons/metrics-server/config.yml + failed_when: false + changed_when: false + register: kube_metrics_server_diff_result + - name: install metrics-server onto the cluster + when: kube_metrics_server_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml - register: kube_metrics_server_apply_result - changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 deleted file mode 100644 index fc8d287b..00000000 --- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 +++ /dev/null @@ -1,155 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:aggregated-metrics-reader - labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-server:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metrics-server-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metrics-server - namespace: kube-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: metrics-server - namespace: kube-system - labels: - k8s-app: metrics-server -spec: - selector: - matchLabels: - k8s-app: metrics-server - template: - metadata: - name: metrics-server - labels: - k8s-app: metrics-server - spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} - containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7 - imagePullPolicy: IfNotPresent - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP,ExternalIP - ports: - - name: main-port - containerPort: 4443 - protocol: TCP - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - name: tmp-dir - mountPath: /tmp - nodeSelector: - kubernetes.io/os: linux - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master ---- -apiVersion: v1 -kind: Service -metadata: - name: metrics-server - namespace: kube-system - labels: - kubernetes.io/name: "Metrics-server" - kubernetes.io/cluster-service: "true" -spec: - selector: - k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: main-port ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 index 1e3789bb..7b427254 100644 --- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 +++ b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 @@ -1,20 +1,75 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: system:aggregated-metrics-reader labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" + k8s-app: metrics-server rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + k8s-app: metrics-server name: metrics-server:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io @@ -26,131 +81,117 @@ subjects: namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +kind: ClusterRoleBinding metadata: - name: metrics-server-auth-reader - namespace: kube-system + labels: + k8s-app: metrics-server + name: system:metrics-server roleRef: apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader + kind: ClusterRole + name: system:metrics-server subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- apiVersion: v1 -kind: ServiceAccount +kind: Service metadata: + labels: + k8s-app: metrics-server name: metrics-server namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server --- apiVersion: apps/v1 kind: Deployment metadata: - name: metrics-server - namespace: kube-system labels: k8s-app: metrics-server + name: metrics-server + namespace: kube-system spec: selector: matchLabels: k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 template: metadata: - name: metrics-server labels: k8s-app: metrics-server spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server-amd64:v0.3.6 + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1 imagePullPolicy: IfNotPresent - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP,ExternalIP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server ports: - - name: main-port - containerPort: 4443 + - containerPort: 4443 + name: https protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi securityContext: + allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 volumeMounts: - - name: tmp-dir - mountPath: /tmp + - mountPath: /tmp + name: tmp-dir nodeSelector: kubernetes.io/os: linux - kubernetes.io/arch: "amd64" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir --- -apiVersion: v1 -kind: Service +apiVersion: apiregistration.k8s.io/v1 +kind: APIService metadata: - name: metrics-server - namespace: kube-system labels: - kubernetes.io/name: "Metrics-server" - kubernetes.io/cluster-service: "true" -spec: - selector: k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: main-port ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml index c9598638..626395b7 100644 --- a/roles/kubernetes/base/tasks/cri_docker.yml +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -10,7 +10,7 @@ path: /etc/systemd/system/kubelet.service.d/ state: directory -- name: install systemd snippet to make sure kubelet starts after docker +- name: install systemd snippet to make sure kubelet starts after cri-dockerd copy: content: | [Unit] diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 4ff976a1..d2f7ef81 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: check if prometheus apt component of spreadspace repo is enabled +- name: check if container apt component of spreadspace repo is enabled assert: msg: "please enable the 'container' component of spreadspace repo using 'spreadspace_apt_repo_components'" that: diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 40cee3b7..350ecdee 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -87,8 +87,8 @@ - name: install cni config template: - src: net_kubeguard/cni.json.j2 - dest: /etc/cni/net.d/kubeguard.conf + src: net_kubeguard/cni.conflist.j2 + dest: /etc/cni/net.d/kubeguard.conflist - name: install packages needed for debugging kube-router when: kubernetes_network_plugin_variant == 'with-kube-router' diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 index 2e0eaf5d..19118b2e 100644 --- a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 +++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 @@ -16,7 +16,7 @@ defaults option dontlog-normal frontend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} bind *:6443 {% else %} bind 127.0.0.1:6443 @@ -25,7 +25,7 @@ frontend kube_api default_backend kube_api backend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} balance first {% else %} balance roundrobin @@ -36,6 +36,6 @@ backend kube_api default-server inter 5s fall 3 rise 2 timeout connect 5s timeout server 3h -{% for master in groups['_kubernetes_masters_'] %} - server {{ master }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none +{% for node in groups['_kubernetes_controlplane_nodes_'] %} + server {{ node }} {{ hostvars[node].kubernetes_overlay_node_ip | default(hostvars[node].ansible_default_ipv4.address) }}:6442 {% if node == inventory_hostname %}id 1{% endif %} check check-ssl verify none {% endfor %} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 new file mode 100644 index 00000000..240d86ef --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.1", + "name": "kubeguard", + "plugins": [ + { + "type": "bridge", + "bridge": "kubeguard-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" + } + } + ] +} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 deleted file mode 100644 index eb9e3d61..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "kubeguard", - "type": "bridge", - "bridge": "kubeguard-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" - } -} diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml index 04df760f..d5bd378e 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml @@ -12,48 +12,47 @@ mode: 0600 -- name: install primary master - include_tasks: primary-master.yml - when: "'_kubernetes_primary_master_' in group_names" +- name: install primary control-plane node + include_tasks: primary.yml + when: "'_kubernetes_primary_controlplane_node_' in group_names" -- name: install secondary masters - include_tasks: secondary-masters.yml - when: "'_kubernetes_primary_master_' not in group_names" +- name: install secondary control-plane nodes + include_tasks: secondary.yml + when: "'_kubernetes_primary_controlplane_node_' not in group_names" -- name: check if master is tainted (1/2) +- name: check if control-plane node is tainted (1/2) command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" check_mode: no register: kubectl_get_node changed_when: False -- name: check if master is tainted (2/2) +- name: check if control-plane node is tainted (2/2) set_fact: kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" -- name: remove taint from master/control-plane node - when: not kubernetes.dedicated_master +- name: remove taint from control-plane node + when: not kubernetes.dedicated_controlplane_nodes block: - - name: remove master taint from node - when: "'node-role.kubernetes.io/master' in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - - name: remove control-plane taint from node when: "'node-role.kubernetes.io/control-plane' in kube_node_taints" command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-" -- name: add taint from master/control-plane node - when: kubernetes.dedicated_master + - name: remove deprecated master taint from node + when: "'node-role.kubernetes.io/master' in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" + +- name: add taint from control-plane node + when: kubernetes.dedicated_controlplane_nodes block: - - name: add master taint from node + - name: add control-plane taint to node + when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" + + - name: add deprecated master taint to node when: "'node-role.kubernetes.io/master' not in kube_node_taints" command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" - ## TODO: enable this once all needed addons and workloads have tolerations set accordingly - # - name: add control-plane taint from node - # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" - # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" - - name: prepare kubectl (1/2) file: name: /root/.kube diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml index 0a216414..4584e583 100644 --- a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml @@ -4,8 +4,13 @@ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if kube-router is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kube_router_diff_result + - name: install kube-router on to the cluster + when: kube_router_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kube_router_apply_result - changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml index a572ca89..66dac49b 100644 --- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml @@ -7,8 +7,13 @@ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) + - name: check if kubeguard (kube-router) is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kubeguard_diff_result + - name: install kubeguard (kube-router) on to the cluster + when: kubeguard_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kubeguard_apply_result - changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml index bf1a16d5..bf1a16d5 100644 --- a/roles/kubernetes/kubeadm/master/tasks/net_none.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml index 6fb63d09..65a6f7c8 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -4,7 +4,6 @@ path: /etc/kubernetes/kubelet.conf register: kubeconfig_kubelet_stats - ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) - name: generate kubeadm.config template: src: kubeadm.config.j2 @@ -25,11 +24,11 @@ # check_mode: no # register: kubeadm_token_generate - - name: initialize kubernetes master and store log + - name: initialize kubernetes primary control-plane node and store log block: - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + - name: initialize kubernetes primary control-plane node + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt register: kubeadm_init @@ -47,7 +46,7 @@ content: "{{ kubeadm_init.stderr }}\n" dest: /etc/kubernetes/kubeadm-init.errors - - name: create bootstrap token for existing cluster + - name: create bootstrap token for new cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate @@ -118,11 +117,16 @@ src: node-local-dns.yml.j2 dest: /etc/kubernetes/node-local-dns.yml - ## TODO: move to server-side apply (GA since 1.22) -- name: install node-local dns cache +- name: check if node-local dns cache is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/node-local-dns.yml + failed_when: false + changed_when: false + register: kube_node_local_dns_diff_result + +- name: install node-local dns cache + when: kube_node_local_dns_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml - register: kube_node_local_dns_apply_result - changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 ## Network Plugin diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml index 4759b7fd..a2dbe081 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml @@ -1,7 +1,7 @@ --- -- name: fetch secrets needed for secondary master +- name: fetch secrets needed for secondary control-plane node run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" block: - name: fetch list of current nodes @@ -15,7 +15,7 @@ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - name: upload certs - when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" + when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0" command: kubeadm init phase upload-certs --upload-certs check_mode: no register: kubeadm_upload_certs @@ -25,9 +25,9 @@ set_fact: kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" -- name: join kubernetes secondary master node and store log +- name: join kubernetes secondary control-plane node and store log block: - - name: join kubernetes secondary master node + - name: join kubernetes secondary control-plane node throttle: 1 command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: @@ -49,7 +49,7 @@ dest: /etc/kubernetes/kubeadm-join.errors # TODO: acutally check if node has registered -- name: give the new master(s) a moment to register +- name: give the new control-plane node(s) a moment to register when: kubeadm_join is changed pause: # noqa 503 seconds: 5 diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 index 345c9bf9..345c9bf9 100644 --- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 index 2fa98ed6..a0f3efe7 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 @@ -1,6 +1,6 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} @@ -11,10 +11,14 @@ localAPIEndpoint: {% if kubernetes_overlay_node_ip is defined %} advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} +{% if kubernetes_network_plugin_replaces_kube_proxy %} +skipPhases: +- addon/kube-proxy +{% endif %} nodeRegistration: criSocket: {{ kubernetes_cri_socket }} --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} @@ -43,8 +47,6 @@ controllerManager: extraArgs: node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" scheduler: {} -dns: - type: CoreDNS --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 index a2660db2..a2660db2 100644 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 index 382164cb..382164cb 100644 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 index 382164cb..382164cb 100644 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 index e343f4a7..e343f4a7 100644 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 index ec30d670..ec30d670 100644 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 index d536d5a7..d536d5a7 100644 --- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml index 71ed0d04..45020963 100644 --- a/roles/kubernetes/kubeadm/prune/tasks/main.yml +++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: remove nodes from api server run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" command: "kubectl delete node {{ item }}" diff --git a/roles/kubernetes/kubeadm/upgrade b/roles/kubernetes/kubeadm/upgrade index c2f97d40..2cfa18cd 100644 --- a/roles/kubernetes/kubeadm/upgrade +++ b/roles/kubernetes/kubeadm/upgrade @@ -1,8 +1,8 @@ Cluster Upgrades: ================= -primary master: ---------------- +primary control-plane node: +--------------------------- VERSION=1.23.1 @@ -26,8 +26,8 @@ apt-get update && apt-get install -y "kubelet=$VERSION-00" "kubectl=$VERSION-00" kubectl uncordon $(hostname) -secondary master: ------------------ +secondary control-plane node: +----------------------------- VERSION=1.23.1 @@ -55,7 +55,7 @@ apt-get update sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubeadm.pref apt-get install -y "kubeadm=$VERSION-00" -@primary master: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data +@primary control-plane node: kubectl drain <node> --ignore-daemonsets --delete-emptydir-data kubeadm upgrade node sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubelet.pref @@ -64,4 +64,4 @@ apt-get update && apt-get install -y kubelet="$VERSION-00" "kubectl=$VERSION-00" // security updates + reboot ? -@primary master: kubectl uncordon <node> +@primary control-plane node: kubectl uncordon <node> diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/worker/tasks/main.yml index 13937bcf..eabb7a1f 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/worker/tasks/main.yml @@ -1,7 +1,7 @@ --- -- name: join kubernetes node and store log +- name: join kubernetes worker node and store log block: - - name: join kubernetes node + - name: join kubernetes worker node command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf |