From 768b1bf272b30a5e43a6da9bb17ae1abc9ccc288 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 7 May 2022 21:01:01 +0200 Subject: cosmetic changes --- roles/kubernetes/base/tasks/cri_docker.yml | 2 +- roles/kubernetes/base/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml index c9598638..626395b7 100644 --- a/roles/kubernetes/base/tasks/cri_docker.yml +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -10,7 +10,7 @@ path: /etc/systemd/system/kubelet.service.d/ state: directory -- name: install systemd snippet to make sure kubelet starts after docker +- name: install systemd snippet to make sure kubelet starts after cri-dockerd copy: content: | [Unit] diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 4ff976a1..d2f7ef81 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -1,5 +1,5 @@ --- -- name: check if prometheus apt component of spreadspace repo is enabled +- name: check if container apt component of spreadspace repo is enabled assert: msg: "please enable the 'container' component of spreadspace repo using 'spreadspace_apt_repo_components'" that: -- cgit v1.2.3 From c09b07327b688a6a47f523a15c1a5c29d4f476d0 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 7 May 2022 22:45:49 +0200 Subject: k8s: rename masters to control-plane nodes --- chaos-at-home/k8s-chtest.yml | 6 +- common/kubernetes-cluster-cleanup.yml | 2 +- common/kubernetes-cluster-layout.yml | 30 +-- common/kubernetes-cluster.yml | 18 +- dan/emc-ctrl.yml | 13 ++ dan/emc-master.yml | 13 -- dan/k8s-emc.yml | 6 +- inventory/group_vars/chaos-at-home/network.yml | 2 +- inventory/group_vars/k8s-chtest/vars.yml | 6 +- inventory/group_vars/k8s-emc/vars.yml | 6 +- inventory/group_vars/vmhost-sk-2019vm/vars.yml | 6 +- inventory/host_vars/ch-dione.yml | 33 +-- inventory/host_vars/ch-helene.yml | 33 +-- inventory/host_vars/ch-k8s-ctrl.yml | 72 +++++++ inventory/host_vars/ch-k8s-master.yml | 72 ------- inventory/host_vars/emc-ctrl.yml | 86 ++++++++ inventory/host_vars/emc-master.yml | 86 -------- inventory/hosts.ini | 22 +- .../templates/components.0.3.6.yml.j2 | 156 -------------- .../templates/components.0.3.7.yml.j2 | 155 -------------- .../kubeadm/base/templates/haproxy.cfg.j2 | 8 +- .../kubeadm/control-plane/tasks/main.yml | 76 +++++++ .../control-plane/tasks/net_kube-router.yml | 11 + .../kubeadm/control-plane/tasks/net_kubeguard.yml | 14 ++ .../kubeadm/control-plane/tasks/net_none.yml | 2 + .../kubeadm/control-plane/tasks/primary.yml | 131 ++++++++++++ .../kubeadm/control-plane/tasks/secondary.yml | 55 +++++ .../control-plane/templates/encryption-config.j2 | 13 ++ .../control-plane/templates/kubeadm.config.j2 | 53 +++++ .../templates/net_kube-router/config.0.4.0.yml.j2 | 235 ++++++++++++++++++++ .../templates/net_kube-router/config.1.1.1.yml.j2 | 236 +++++++++++++++++++++ .../templates/net_kube-router/config.1.4.0.yml.j2 | 236 +++++++++++++++++++++ .../net_kubeguard/kube-router.0.4.0.yml.j2 | 170 +++++++++++++++ .../net_kubeguard/kube-router.1.1.1.yml.j2 | 170 +++++++++++++++ .../control-plane/templates/node-local-dns.yml.j2 | 211 ++++++++++++++++++ roles/kubernetes/kubeadm/master/tasks/main.yml | 77 ------- .../kubeadm/master/tasks/net_kube-router.yml | 11 - .../kubeadm/master/tasks/net_kubeguard.yml | 14 -- roles/kubernetes/kubeadm/master/tasks/net_none.yml | 2 - .../kubeadm/master/tasks/primary-master.yml | 131 ------------ .../kubeadm/master/tasks/secondary-masters.yml | 55 ----- .../kubeadm/master/templates/encryption-config.j2 | 13 -- .../kubeadm/master/templates/kubeadm.config.j2 | 53 ----- .../templates/net_kube-router/config.0.4.0.yml.j2 | 235 -------------------- .../templates/net_kube-router/config.1.1.1.yml.j2 | 236 --------------------- .../templates/net_kube-router/config.1.4.0.yml.j2 | 236 --------------------- .../net_kubeguard/kube-router.0.4.0.yml.j2 | 170 --------------- .../net_kubeguard/kube-router.1.1.1.yml.j2 | 170 --------------- .../kubeadm/master/templates/node-local-dns.yml.j2 | 211 ------------------ roles/kubernetes/kubeadm/node/tasks/main.yml | 22 -- roles/kubernetes/kubeadm/prune/tasks/main.yml | 2 +- roles/kubernetes/kubeadm/upgrade | 12 +- roles/kubernetes/kubeadm/worker/tasks/main.yml | 22 ++ 53 files changed, 1908 insertions(+), 2208 deletions(-) create mode 100644 dan/emc-ctrl.yml delete mode 100644 dan/emc-master.yml create mode 100644 inventory/host_vars/ch-k8s-ctrl.yml delete mode 100644 inventory/host_vars/ch-k8s-master.yml create mode 100644 inventory/host_vars/emc-ctrl.yml delete mode 100644 inventory/host_vars/emc-master.yml delete mode 100644 roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 delete mode 100644 roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/main.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/primary.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/tasks/main.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_none.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/primary-master.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml delete mode 100644 roles/kubernetes/kubeadm/master/templates/encryption-config.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/node/tasks/main.yml create mode 100644 roles/kubernetes/kubeadm/worker/tasks/main.yml (limited to 'roles/kubernetes') diff --git a/chaos-at-home/k8s-chtest.yml b/chaos-at-home/k8s-chtest.yml index 5f7c830e..01ba7db8 100644 --- a/chaos-at-home/k8s-chtest.yml +++ b/chaos-at-home/k8s-chtest.yml @@ -12,8 +12,8 @@ vars: kubernetes_cluster_layout: nodes_group: k8s-chtest - masters: - - ch-k8s-master + controlplane_nodes: + - ch-k8s-ctrl ### hack hack hack... - name: cook kubernetes secrets @@ -30,6 +30,6 @@ - import_playbook: ../common/kubernetes-cluster-cleanup.yml - name: install addons - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ roles: - role: kubernetes/addons/metrics-server diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml index 5647e3d6..87e59d31 100644 --- a/common/kubernetes-cluster-cleanup.yml +++ b/common/kubernetes-cluster-cleanup.yml @@ -1,6 +1,6 @@ --- - name: check for nodes to be removed - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ tasks: - name: fetch list of current nodes command: kubectl get nodes -o name diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml index bd972a6d..31751a8d 100644 --- a/common/kubernetes-cluster-layout.yml +++ b/common/kubernetes-cluster-layout.yml @@ -5,20 +5,20 @@ gather_facts: no run_once: yes tasks: - - name: sanity check - fail if masters are not included in nodes + - name: sanity check - fail if control-plane nodes are not included in node group assert: - msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)" - that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0 + msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (control-plane and worker)" + that: kubernetes_cluster_layout.controlplane_nodes | difference(ansible_play_hosts_all) | length == 0 - - name: sanity check - fail if primary master is not in masters + - name: sanity check - fail if primary control-group node is not in control-group node list assert: - msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" - that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters + msg: "kubernetes_cluster_layout.controlplane_nodes must include kubernetes_cluster_layout.primary_controlplane_node" + that: kubernetes_cluster_layout.primary_controlplane_node is undefined or kubernetes_cluster_layout.primary_controlplane_node in kubernetes_cluster_layout.controlplane_nodes - - name: sanity check - fail on multiple masters if no primary master is configured + - name: sanity check - fail on multiple control-plane nodes but no primary is configured assert: - msg: "for multiple masters to work you need to define kubernetes_cluster_layout.primary_master" - that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined + msg: "for multiple control-plane nodes to work you need to define kubernetes_cluster_layout.primary_controlplane_node" + that: (kubernetes_cluster_layout.controlplane_nodes | length) == 1 or kubernetes_cluster_layout.primary_controlplane_node is defined - name: create group for all kubernetes nodes loop: "{{ ansible_play_hosts_all }}" @@ -28,19 +28,19 @@ group: _kubernetes_nodes_ changed_when: False - - name: create group for kubernetes master nodes - loop: "{{ kubernetes_cluster_layout.masters }}" + - name: create group for kubernetes control-plane nodes + loop: "{{ kubernetes_cluster_layout.controlplane_nodes }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[item].inventory_dir }}" - group: _kubernetes_masters_ + group: _kubernetes_controlplane_nodes_ changed_when: False - - name: create group for kubernetes primary master + - name: create group for kubernetes primary control-plane node vars: - item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" + item: "{{ kubernetes_cluster_layout.primary_controlplane_node | default(kubernetes_cluster_layout.controlplane_nodes[0]) }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[item].inventory_dir }}" - group: _kubernetes_primary_master_ + group: _kubernetes_primary_controlplane_node_ changed_when: False diff --git a/common/kubernetes-cluster.yml b/common/kubernetes-cluster.yml index 4ee91dd3..6958db15 100644 --- a/common/kubernetes-cluster.yml +++ b/common/kubernetes-cluster.yml @@ -40,19 +40,19 @@ - role: kubernetes/base - role: kubernetes/kubeadm/base -- name: configure primary kubernetes master - hosts: _kubernetes_primary_master_ +- name: configure primary kubernetes control-plane node + hosts: _kubernetes_primary_controlplane_node_ roles: - - role: kubernetes/kubeadm/master + - role: kubernetes/kubeadm/control-plane -- name: configure secondary kubernetes masters - hosts: _kubernetes_masters_:!_kubernetes_primary_master_ +- name: configure secondary kubernetes control-plane nodes + hosts: _kubernetes_controlplane_nodes_:!_kubernetes_primary_controlplane_node_ roles: - - role: kubernetes/kubeadm/master + - role: kubernetes/kubeadm/control-plane -- name: configure kubernetes non-master nodes - hosts: _kubernetes_nodes_:!_kubernetes_masters_ +- name: configure kubernetes worker nodes + hosts: _kubernetes_nodes_:!_kubernetes_controlplane_nodes_ roles: - - role: kubernetes/kubeadm/node + - role: kubernetes/kubeadm/worker ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/dan/emc-ctrl.yml b/dan/emc-ctrl.yml new file mode 100644 index 00000000..285faaaa --- /dev/null +++ b/dan/emc-ctrl.yml @@ -0,0 +1,13 @@ +--- +- name: Basic Setup + hosts: emc-ctrl + roles: + - role: apt-repo/base + - role: core/base + - role: core/sshd/base + - role: core/zsh + - role: core/ntp + - role: apt-repo/spreadspace +# - role: monitoring/prometheus/exporter + - role: storage/lvm/groups + - role: elevate/emc-stats diff --git a/dan/emc-master.yml b/dan/emc-master.yml deleted file mode 100644 index edfc0ffe..00000000 --- a/dan/emc-master.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Basic Setup - hosts: emc-master - roles: - - role: apt-repo/base - - role: core/base - - role: core/sshd/base - - role: core/zsh - - role: core/ntp - - role: apt-repo/spreadspace -# - role: monitoring/prometheus/exporter - - role: storage/lvm/groups - - role: elevate/emc-stats diff --git a/dan/k8s-emc.yml b/dan/k8s-emc.yml index 468919ef..75e81c8b 100644 --- a/dan/k8s-emc.yml +++ b/dan/k8s-emc.yml @@ -14,8 +14,8 @@ vars: kubernetes_cluster_layout: nodes_group: k8s-emc - masters: - - emc-master + controlplane_nodes: + - emc-ctrl ### hack hack hack... - name: cook kubernetes secrets @@ -32,6 +32,6 @@ - import_playbook: ../common/kubernetes-cluster-cleanup.yml - name: install addons - hosts: _kubernetes_primary_master_ + hosts: _kubernetes_primary_controlplane_node_ roles: - role: kubernetes/addons/metrics-server diff --git a/inventory/group_vars/chaos-at-home/network.yml b/inventory/group_vars/chaos-at-home/network.yml index 2957a24a..46564977 100644 --- a/inventory/group_vars/chaos-at-home/network.yml +++ b/inventory/group_vars/chaos-at-home/network.yml @@ -68,7 +68,7 @@ network_zones: ch-http-proxy: 8 ch-imap-proxy: 9 ch-vpn: 10 - ch-k8s-master: 20 + ch-k8s-ctrl: 20 ch-jump: 22 ch-gw-lan: 28 ch-iot: 30 diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index e01b996d..66824314 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -1,15 +1,15 @@ --- -kubernetes_version: 1.23.1 +kubernetes_version: 1.23.6 kubernetes_container_runtime: containerd kubernetes_network_plugin: kube-router kubernetes_network_plugin_version: 1.4.0 -kubernetes_network_plugin_replaces_kube_proxy: true +kubernetes_network_plugin_replaces_kube_proxy: yes kubernetes: cluster_name: chtest - dedicated_master: True + dedicated_controlplane_nodes: yes api_extra_sans: - 192.168.32.20 diff --git a/inventory/group_vars/k8s-emc/vars.yml b/inventory/group_vars/k8s-emc/vars.yml index c13e610c..b2a8fe39 100644 --- a/inventory/group_vars/k8s-emc/vars.yml +++ b/inventory/group_vars/k8s-emc/vars.yml @@ -6,10 +6,10 @@ kubernetes_network_plugin: kubeguard kubernetes: cluster_name: emc - dedicated_master: False + dedicated_controlplane_nodes: yes api_extra_sans: - 178.63.180.137 - - emc-master.elev8.at + - emc-ctrl.elev8.at pod_ip_range: 172.18.0.0/16 pod_ip_range_size: 24 @@ -37,7 +37,7 @@ kubeguard: emc-dist0: 110 ele-dione: 111 ele-helene: 112 - emc-master: 127 + emc-ctrl: 127 direct_net_zones: encoder: diff --git a/inventory/group_vars/vmhost-sk-2019vm/vars.yml b/inventory/group_vars/vmhost-sk-2019vm/vars.yml index 221fa581..41f8b9db 100644 --- a/inventory/group_vars/vmhost-sk-2019vm/vars.yml +++ b/inventory/group_vars/vmhost-sk-2019vm/vars.yml @@ -11,8 +11,7 @@ vm_host: prefix: 192.168.250.0/24 offsets: sk-torrent: 136 - emc-master: 137 -# lw-master: 137 + emc-ctrl: 137 ele-gwhetzner: 138 sgg-icecast: 141 ch-mimas: 142 @@ -24,8 +23,7 @@ vm_host: prefix: 178.63.180.136/29 offsets: sk-torrent: 0 - emc-master: 1 -# lw-master: 1 + emc-ctrl: 1 ele-gwhetzner: 2 sgg-icecast: 5 ch-mimas: 6 diff --git a/inventory/host_vars/ch-dione.yml b/inventory/host_vars/ch-dione.yml index a3cbbe68..ef9d8657 100644 --- a/inventory/host_vars/ch-dione.yml +++ b/inventory/host_vars/ch-dione.yml @@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6 blackmagic_desktopvideo_include_gui: yes -docker_pkg_provider: docker-com -docker_storage: +# docker_pkg_provider: docker-com +# docker_storage: +# type: lvm +# vg: "{{ host_name }}" +# lv: docker +# size: 10G +# fs: ext4 + +containerd_storage: type: lvm vg: "{{ host_name }}" - lv: docker + lv: containerd size: 10G fs: ext4 @@ -63,15 +70,15 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.24.0 -kubernetes_container_runtime: docker -kubernetes_standalone_max_pods: 42 -kubernetes_standalone_cni_variant: with-portmap +# kubernetes_version: 1.24.0 +# kubernetes_container_runtime: docker +# kubernetes_standalone_max_pods: 42 +# kubernetes_standalone_cni_variant: with-portmap -rtmp_streamer_base_path: /srv/storage/streamer -rtmp_streamer_inst_name: feed -rtmp_streamer_nginx_image_version: 2022-04-29.23 -rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)" -rtmp_streamer_config: - input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] +# rtmp_streamer_base_path: /srv/storage/streamer +# rtmp_streamer_inst_name: feed +# rtmp_streamer_nginx_image_version: 2022-04-29.23 +# rtmp_streamer_decklink_card: "DeckLink 8K Pro (1)" +# rtmp_streamer_config: +# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] diff --git a/inventory/host_vars/ch-helene.yml b/inventory/host_vars/ch-helene.yml index c524bf6a..816b38f8 100644 --- a/inventory/host_vars/ch-helene.yml +++ b/inventory/host_vars/ch-helene.yml @@ -48,11 +48,18 @@ blackmagic_desktopvideo_version: 12.2.2a6 blackmagic_desktopvideo_include_gui: yes -docker_pkg_provider: docker-com -docker_storage: +# docker_pkg_provider: docker-com +# docker_storage: +# type: lvm +# vg: "{{ host_name }}" +# lv: docker +# size: 10G +# fs: ext4 + +containerd_storage: type: lvm vg: "{{ host_name }}" - lv: docker + lv: containerd size: 10G fs: ext4 @@ -63,15 +70,15 @@ kubelet_storage: size: 5G fs: ext4 -kubernetes_version: 1.24.0 -kubernetes_container_runtime: docker -kubernetes_standalone_max_pods: 42 -kubernetes_standalone_cni_variant: with-portmap +# kubernetes_version: 1.24.0 +# kubernetes_container_runtime: docker +# kubernetes_standalone_max_pods: 42 +# kubernetes_standalone_cni_variant: with-portmap -rtmp_streamer_base_path: /srv/storage/streamer -rtmp_streamer_inst_name: feed -rtmp_streamer_nginx_image_version: 2022-04-29.23 -rtmp_streamer_decklink_card: "DeckLink SDI 4K" -rtmp_streamer_config: - input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] +# rtmp_streamer_base_path: /srv/storage/streamer +# rtmp_streamer_inst_name: feed +# rtmp_streamer_nginx_image_version: 2022-04-29.23 +# rtmp_streamer_decklink_card: "DeckLink SDI 4K" +# rtmp_streamer_config: +# input_params: ['-f', 'decklink', '-video_input', 'sdi', '-format_code', 'Hp50', '-ac', '2', '-i'] diff --git a/inventory/host_vars/ch-k8s-ctrl.yml b/inventory/host_vars/ch-k8s-ctrl.yml new file mode 100644 index 00000000..63723000 --- /dev/null +++ b/inventory/host_vars/ch-k8s-ctrl.yml @@ -0,0 +1,72 @@ +--- +install_jumphost: ch-jump + +install: + vm: + memory: 4G + numcpus: 4 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 20g + properties: + 'syncoid:sync': 'false' + system_lvm: + volumes: + - name: root + size: 3G + filesystem: ext4 + mountpoint: / + - name: var + size: 1280M + filesystem: ext4 + mountpoint: /var + - name: var+log + size: 768M + filesystem: ext4 + mountpoint: /var/log + mount_options: + - noatime + - nodev + - noexec + interfaces: + - bridge: br-svc + name: svc0 + +network: + nameservers: "{{ network_zones.svc.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: &_network_primary_ + name: svc0 + address: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" + gateway: "{{ network_zones.svc.gateway }}" + static_routes: + - destination: "{{ network_zones.lan.prefix }}" + gateway: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets['ch-gw-lan']) | ipaddr('address') }}" + interfaces: + - *_network_primary_ + + +spreadspace_apt_repo_components: + - container + + +containerd_storage: + type: lvm + vg: "{{ host_name }}" + lv: containerd + size: 7G + fs: ext4 + +kubelet_storage: + type: lvm + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 diff --git a/inventory/host_vars/ch-k8s-master.yml b/inventory/host_vars/ch-k8s-master.yml deleted file mode 100644 index 63723000..00000000 --- a/inventory/host_vars/ch-k8s-master.yml +++ /dev/null @@ -1,72 +0,0 @@ ---- -install_jumphost: ch-jump - -install: - vm: - memory: 4G - numcpus: 4 - autostart: True - disks: - primary: /dev/sda - scsi: - sda: - type: zfs - name: root - size: 20g - properties: - 'syncoid:sync': 'false' - system_lvm: - volumes: - - name: root - size: 3G - filesystem: ext4 - mountpoint: / - - name: var - size: 1280M - filesystem: ext4 - mountpoint: /var - - name: var+log - size: 768M - filesystem: ext4 - mountpoint: /var/log - mount_options: - - noatime - - nodev - - noexec - interfaces: - - bridge: br-svc - name: svc0 - -network: - nameservers: "{{ network_zones.svc.dns }}" - domain: "{{ host_domain }}" - systemd_link: - interfaces: "{{ install.interfaces }}" - primary: &_network_primary_ - name: svc0 - address: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" - gateway: "{{ network_zones.svc.gateway }}" - static_routes: - - destination: "{{ network_zones.lan.prefix }}" - gateway: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets['ch-gw-lan']) | ipaddr('address') }}" - interfaces: - - *_network_primary_ - - -spreadspace_apt_repo_components: - - container - - -containerd_storage: - type: lvm - vg: "{{ host_name }}" - lv: containerd - size: 7G - fs: ext4 - -kubelet_storage: - type: lvm - vg: "{{ host_name }}" - lv: kubelet - size: 5G - fs: ext4 diff --git a/inventory/host_vars/emc-ctrl.yml b/inventory/host_vars/emc-ctrl.yml new file mode 100644 index 00000000..1ca011ec --- /dev/null +++ b/inventory/host_vars/emc-ctrl.yml @@ -0,0 +1,86 @@ +--- +install: + vm: + memory: 10G + numcpus: 6 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 20g + sdb: + type: blockdev + path: /dev/zvol/storage/streamstats + system_lvm: + volumes: + - name: root + size: 3G + filesystem: ext4 + mountpoint: / + - name: var + size: 1280M + filesystem: ext4 + mountpoint: /var + - name: var+log + size: 768M + filesystem: ext4 + mountpoint: /var/log + mount_options: + - noatime + - nodev + - noexec + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: &_network_primary_ + name: primary0 + address: "{{ vm_host.network.bridges.public.prefix | ipaddr(vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" + gateway: "{{ vm_host.network.bridges.public.prefix | ipaddr(vm_host.network.bridges.public.offsets[vm_host.name]) | ipaddr('address') }}" + template: overlay + overlay: "{{ (vm_host.network.bridges.public.overlays.default.prefix | ipaddr(vm_host.network.bridges.public.overlays.default.offsets[inventory_hostname])).split('/')[0] }}" + interfaces: + - *_network_primary_ + +external_ip: "{{ network.primary.overlay }}" + + +spreadspace_apt_repo_components: + - prometheus + - container + + +containerd_storage: + type: lvm + vg: "{{ host_name }}" + lv: containerd + size: 7G + fs: ext4 + +kubelet_storage: + type: lvm + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 + + +lvm_groups: + streamstats: + pvs: + - /dev/sdb + +emc_stats_storage: + type: lvm + vg: streamstats + lv: stats + size: 42G + fs: ext4 diff --git a/inventory/host_vars/emc-master.yml b/inventory/host_vars/emc-master.yml deleted file mode 100644 index 1ca011ec..00000000 --- a/inventory/host_vars/emc-master.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- -install: - vm: - memory: 10G - numcpus: 6 - autostart: True - disks: - primary: /dev/sda - scsi: - sda: - type: zfs - name: root - size: 20g - sdb: - type: blockdev - path: /dev/zvol/storage/streamstats - system_lvm: - volumes: - - name: root - size: 3G - filesystem: ext4 - mountpoint: / - - name: var - size: 1280M - filesystem: ext4 - mountpoint: /var - - name: var+log - size: 768M - filesystem: ext4 - mountpoint: /var/log - mount_options: - - noatime - - nodev - - noexec - interfaces: - - bridge: br-public - name: primary0 - -network: - nameservers: "{{ vm_host.network.dns }}" - domain: "{{ host_domain }}" - systemd_link: - interfaces: "{{ install.interfaces }}" - primary: &_network_primary_ - name: primary0 - address: "{{ vm_host.network.bridges.public.prefix | ipaddr(vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" - gateway: "{{ vm_host.network.bridges.public.prefix | ipaddr(vm_host.network.bridges.public.offsets[vm_host.name]) | ipaddr('address') }}" - template: overlay - overlay: "{{ (vm_host.network.bridges.public.overlays.default.prefix | ipaddr(vm_host.network.bridges.public.overlays.default.offsets[inventory_hostname])).split('/')[0] }}" - interfaces: - - *_network_primary_ - -external_ip: "{{ network.primary.overlay }}" - - -spreadspace_apt_repo_components: - - prometheus - - container - - -containerd_storage: - type: lvm - vg: "{{ host_name }}" - lv: containerd - size: 7G - fs: ext4 - -kubelet_storage: - type: lvm - vg: "{{ host_name }}" - lv: kubelet - size: 5G - fs: ext4 - - -lvm_groups: - streamstats: - pvs: - - /dev/sdb - -emc_stats_storage: - type: lvm - vg: streamstats - lv: stats - size: 42G - fs: ext4 diff --git a/inventory/hosts.ini b/inventory/hosts.ini index 74e37925..581913b6 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -51,7 +51,7 @@ ch-calypso host_name=calypso ch-thetys host_name=thetys ch-dione host_name=dione ch-helene host_name=helene -ch-k8s-master host_name=k8s-master +ch-k8s-ctrl host_name=k8s-ctrl ch-hpws-maxi ch-hpws-mini1 ch-alix1d @@ -246,7 +246,7 @@ host_domain=elev8.at env_group=dan [emc] -emc-master +emc-ctrl [emc:children] emc-dist @@ -342,7 +342,7 @@ ch-iot ch-vpn ch-mon ch-omd -ch-k8s-master +ch-k8s-ctrl ch-installsmb [vmhost-ch-prometheus] ch-prometheus @@ -364,7 +364,7 @@ sk-testvm sk-torrent ch-mimas ele-gwhetzner -emc-master +emc-ctrl sgg-icecast [vmhost-sk-2019vm] sk-2019vm @@ -468,7 +468,7 @@ emc-dist emc-xx [hetzner] -emc-master +emc-ctrl sk-testvm sk-torrent sgg-icecast @@ -548,11 +548,11 @@ emc-dist [k8s-emc-streamer:children] emc-xx -[k8s-emc-master] -emc-master +[k8s-emc-ctrl] +emc-ctrl [k8s-emc:children] -k8s-emc-master +k8s-emc-ctrl k8s-emc-encoder k8s-emc-distribution k8s-emc-streamer @@ -563,9 +563,9 @@ k8s-emc-streamer ch-dione ch-helene -[k8s-chtest-master] -ch-k8s-master +[k8s-chtest-ctrl] +ch-k8s-ctrl [k8s-chtest:children] -k8s-chtest-master +k8s-chtest-ctrl k8s-chtest-encoder diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 deleted file mode 100644 index 1e3789bb..00000000 --- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 +++ /dev/null @@ -1,156 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:aggregated-metrics-reader - labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-server:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metrics-server-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metrics-server - namespace: kube-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: metrics-server - namespace: kube-system - labels: - k8s-app: metrics-server -spec: - selector: - matchLabels: - k8s-app: metrics-server - template: - metadata: - name: metrics-server - labels: - k8s-app: metrics-server - spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} - containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server-amd64:v0.3.6 - imagePullPolicy: IfNotPresent - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP,ExternalIP - ports: - - name: main-port - containerPort: 4443 - protocol: TCP - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - name: tmp-dir - mountPath: /tmp - nodeSelector: - kubernetes.io/os: linux - kubernetes.io/arch: "amd64" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master ---- -apiVersion: v1 -kind: Service -metadata: - name: metrics-server - namespace: kube-system - labels: - kubernetes.io/name: "Metrics-server" - kubernetes.io/cluster-service: "true" -spec: - selector: - k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: main-port ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 deleted file mode 100644 index fc8d287b..00000000 --- a/roles/kubernetes/addons/metrics-server/templates/components.0.3.7.yml.j2 +++ /dev/null @@ -1,155 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:aggregated-metrics-reader - labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-server:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metrics-server-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metrics-server - namespace: kube-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: metrics-server - namespace: kube-system - labels: - k8s-app: metrics-server -spec: - selector: - matchLabels: - k8s-app: metrics-server - template: - metadata: - name: metrics-server - labels: - k8s-app: metrics-server - spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} - containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7 - imagePullPolicy: IfNotPresent - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP,ExternalIP - ports: - - name: main-port - containerPort: 4443 - protocol: TCP - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - volumeMounts: - - name: tmp-dir - mountPath: /tmp - nodeSelector: - kubernetes.io/os: linux - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master ---- -apiVersion: v1 -kind: Service -metadata: - name: metrics-server - namespace: kube-system - labels: - kubernetes.io/name: "Metrics-server" - kubernetes.io/cluster-service: "true" -spec: - selector: - k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: main-port ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 index 2e0eaf5d..19118b2e 100644 --- a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 +++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 @@ -16,7 +16,7 @@ defaults option dontlog-normal frontend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} bind *:6443 {% else %} bind 127.0.0.1:6443 @@ -25,7 +25,7 @@ frontend kube_api default_backend kube_api backend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} balance first {% else %} balance roundrobin @@ -36,6 +36,6 @@ backend kube_api default-server inter 5s fall 3 rise 2 timeout connect 5s timeout server 3h -{% for master in groups['_kubernetes_masters_'] %} - server {{ master }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none +{% for node in groups['_kubernetes_controlplane_nodes_'] %} + server {{ node }} {{ hostvars[node].kubernetes_overlay_node_ip | default(hostvars[node].ansible_default_ipv4.address) }}:6442 {% if node == inventory_hostname %}id 1{% endif %} check check-ssl verify none {% endfor %} diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml new file mode 100644 index 00000000..d5bd378e --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- name: create direcotry for encryption config + file: + name: /etc/kubernetes/encryption + state: directory + mode: 0700 + +- name: install encryption config + template: + src: encryption-config.j2 + dest: /etc/kubernetes/encryption/config + mode: 0600 + + +- name: install primary control-plane node + include_tasks: primary.yml + when: "'_kubernetes_primary_controlplane_node_' in group_names" + +- name: install secondary control-plane nodes + include_tasks: secondary.yml + when: "'_kubernetes_primary_controlplane_node_' not in group_names" + + +- name: check if control-plane node is tainted (1/2) + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" + check_mode: no + register: kubectl_get_node + changed_when: False + +- name: check if control-plane node is tainted (2/2) + set_fact: + kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" + +- name: remove taint from control-plane node + when: not kubernetes.dedicated_controlplane_nodes + block: + - name: remove control-plane taint from node + when: "'node-role.kubernetes.io/control-plane' in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-" + + - name: remove deprecated master taint from node + when: "'node-role.kubernetes.io/master' in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" + +- name: add taint from control-plane node + when: kubernetes.dedicated_controlplane_nodes + block: + - name: add control-plane taint to node + when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" + + - name: add deprecated master taint to node + when: "'node-role.kubernetes.io/master' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" + +- name: prepare kubectl (1/2) + file: + name: /root/.kube + state: directory + +- name: prepare kubectl (2/2) + file: + dest: /root/.kube/config + src: /etc/kubernetes/admin.conf + state: link + +- name: add kubectl completion config for shells + loop: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + content: | + source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml new file mode 100644 index 00000000..0a216414 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml @@ -0,0 +1,11 @@ +--- +- name: generate kube-router configuration + template: + src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + + ## TODO: move to server-side apply (GA since 1.22) +- name: install kube-router on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kube_router_apply_result + changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml new file mode 100644 index 00000000..a572ca89 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml @@ -0,0 +1,14 @@ +--- +- name: install kube-router variant + when: "kubernetes_network_plugin_variant == 'with-kube-router'" + block: + - name: generate kubeguard (kube-router) configuration + template: + src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + + ## TODO: move to server-side apply (GA since 1.22) + - name: install kubeguard (kube-router) on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kubeguard_apply_result + changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml new file mode 100644 index 00000000..bf1a16d5 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml @@ -0,0 +1,2 @@ +--- +## this "plugin" is for testing purposes only diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml new file mode 100644 index 00000000..22a5af42 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -0,0 +1,131 @@ +--- +- name: check if kubeconfig kubelet.conf already exists + stat: + path: /etc/kubernetes/kubelet.conf + register: kubeconfig_kubelet_stats + + ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) +- name: generate kubeadm.config + template: + src: kubeadm.config.j2 + dest: /etc/kubernetes/kubeadm.config + register: kubeadm_config + +### cluster not yet initialized + +- name: create new cluster + when: not kubeconfig_kubelet_stats.stat.exists + block: + + #### kubeadm wants token to come from --config if --config is used + #### i think this is stupid -> TODO: send bug report + # - name: generate bootstrap token for new cluster + # command: kubeadm token generate + # changed_when: False + # check_mode: no + # register: kubeadm_token_generate + + - name: initialize kubernetes primary control-plane node and store log + block: + - name: initialize kubernetes primary control-plane node + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + + - name: dump error output of kubeadm init to log file + when: kubeadm_init.changed and kubeadm_init.stderr + copy: + content: "{{ kubeadm_init.stderr }}\n" + dest: /etc/kubernetes/kubeadm-init.errors + + - name: create bootstrap token for existing cluster + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_generate + + +### cluster is already initialized but config has changed + +- name: upgrade cluster config + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed + block: + + - name: fail for cluster upgrades + fail: + msg: "upgrading cluster config is currently not supported!" + + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + + +## calculate certificate digest + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + args: + executable: /bin/bash + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['_kubernetes_nodes_'] }}" + + +## install node-local-dns + +- name: generate node-local dns cache config + template: + src: node-local-dns.yml.j2 + dest: /etc/kubernetes/node-local-dns.yml + + ## TODO: move to server-side apply (GA since 1.22) +- name: install node-local dns cache + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml + register: kube_node_local_dns_apply_result + changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 + + +## Network Plugin + +- name: install network plugin + include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml new file mode 100644 index 00000000..a2dbe081 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml @@ -0,0 +1,55 @@ +--- +- name: fetch secrets needed for secondary control-plane node + run_once: true + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: upload certs + when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm init phase upload-certs --upload-certs + check_mode: no + register: kubeadm_upload_certs + + +- name: extracting encryption key for certs + set_fact: + kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" + +- name: join kubernetes secondary control-plane node and store log + block: + - name: join kubernetes secondary control-plane node + throttle: 1 + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors + + # TODO: acutally check if node has registered +- name: give the new control-plane node(s) a moment to register + when: kubeadm_join is changed + pause: # noqa 503 + seconds: 5 diff --git a/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 new file mode 100644 index 00000000..345c9bf9 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 @@ -0,0 +1,13 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - secretbox: + keys: +{% for key in kubernetes_secrets.encryption_config_keys %} + - name: key{{ loop.index }} + secret: {{ key }} +{% endfor %} + - identity: {} diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 new file mode 100644 index 00000000..2fa98ed6 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 @@ -0,0 +1,53 @@ +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +localAPIEndpoint: + bindPort: 6442 +{% if kubernetes_overlay_node_ip is defined %} + advertiseAddress: {{ kubernetes_overlay_node_ip }} +{% endif %} +nodeRegistration: + criSocket: {{ kubernetes_cri_socket }} +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: {{ kubernetes_version }} +clusterName: {{ kubernetes.cluster_name }} +imageRepository: k8s.gcr.io +controlPlaneEndpoint: 127.0.0.1:6443 +networking: + dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} + podSubnet: {{ kubernetes.pod_ip_range }} + serviceSubnet: {{ kubernetes.service_ip_range }} +apiServer: + extraArgs: + encryption-provider-config: /etc/kubernetes/encryption/config + extraVolumes: + - name: encryption-config + hostPath: /etc/kubernetes/encryption + mountPath: /etc/kubernetes/encryption + readOnly: true + pathType: Directory +{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} + certSANs: [] +{% else %} + certSANs: + {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} +{% endif %} +controllerManager: + extraArgs: + node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" +scheduler: {} +dns: + type: CoreDNS +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDNS: +- {{ kubernetes_nodelocal_dnscache_ip }} +cgroupDriver: systemd diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 new file mode 100644 index 00000000..a2660db2 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 @@ -0,0 +1,235 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 new file mode 100644 index 00000000..382164cb --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 @@ -0,0 +1,236 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 new file mode 100644 index 00000000..382164cb --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 @@ -0,0 +1,236 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 new file mode 100644 index 00000000..e343f4a7 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 @@ -0,0 +1,170 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --cluster-cidr={{ kubernetes.pod_ip_range }} + - --run-router=false + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 new file mode 100644 index 00000000..ec30d670 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 @@ -0,0 +1,170 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=false + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 new file mode 100644 index 00000000..d536d5a7 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 @@ -0,0 +1,211 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ kubernetes.dns_domain | default('cluster.local') }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health {{ kubernetes_nodelocal_dnscache_ip }}:8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__UPSTREAM__SERVERS__ { + force_tcp + } + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: {{ kubernetes_nodelocal_dnscache_ip }} + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml deleted file mode 100644 index 04df760f..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: create direcotry for encryption config - file: - name: /etc/kubernetes/encryption - state: directory - mode: 0700 - -- name: install encryption config - template: - src: encryption-config.j2 - dest: /etc/kubernetes/encryption/config - mode: 0600 - - -- name: install primary master - include_tasks: primary-master.yml - when: "'_kubernetes_primary_master_' in group_names" - -- name: install secondary masters - include_tasks: secondary-masters.yml - when: "'_kubernetes_primary_master_' not in group_names" - - -- name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" - check_mode: no - register: kubectl_get_node - changed_when: False - -- name: check if master is tainted (2/2) - set_fact: - kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" - -- name: remove taint from master/control-plane node - when: not kubernetes.dedicated_master - block: - - name: remove master taint from node - when: "'node-role.kubernetes.io/master' in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - - - name: remove control-plane taint from node - when: "'node-role.kubernetes.io/control-plane' in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-" - -- name: add taint from master/control-plane node - when: kubernetes.dedicated_master - block: - - name: add master taint from node - when: "'node-role.kubernetes.io/master' not in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" - - ## TODO: enable this once all needed addons and workloads have tolerations set accordingly - # - name: add control-plane taint from node - # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" - # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" - -- name: prepare kubectl (1/2) - file: - name: /root/.kube - state: directory - -- name: prepare kubectl (2/2) - file: - dest: /root/.kube/config - src: /etc/kubernetes/admin.conf - state: link - -- name: add kubectl completion config for shells - loop: - - zsh - - bash - blockinfile: - path: "/root/.{{ item }}rc" - create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" - content: | - source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml deleted file mode 100644 index 0a216414..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: generate kube-router configuration - template: - src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" - dest: /etc/kubernetes/network-plugin.yml - - ## TODO: move to server-side apply (GA since 1.22) -- name: install kube-router on to the cluster - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kube_router_apply_result - changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml deleted file mode 100644 index a572ca89..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: install kube-router variant - when: "kubernetes_network_plugin_variant == 'with-kube-router'" - block: - - name: generate kubeguard (kube-router) configuration - template: - src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" - dest: /etc/kubernetes/network-plugin.yml - - ## TODO: move to server-side apply (GA since 1.22) - - name: install kubeguard (kube-router) on to the cluster - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kubeguard_apply_result - changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/master/tasks/net_none.yml deleted file mode 100644 index bf1a16d5..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_none.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## this "plugin" is for testing purposes only diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml deleted file mode 100644 index 6fb63d09..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -- name: check if kubeconfig kubelet.conf already exists - stat: - path: /etc/kubernetes/kubelet.conf - register: kubeconfig_kubelet_stats - - ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) -- name: generate kubeadm.config - template: - src: kubeadm.config.j2 - dest: /etc/kubernetes/kubeadm.config - register: kubeadm_config - -### cluster not yet initialized - -- name: create new cluster - when: not kubeconfig_kubelet_stats.stat.exists - block: - - #### kubeadm wants token to come from --config if --config is used - #### i think this is stupid -> TODO: send bug report - # - name: generate bootstrap token for new cluster - # command: kubeadm token generate - # changed_when: False - # check_mode: no - # register: kubeadm_token_generate - - - name: initialize kubernetes master and store log - block: - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - always: - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log - - - name: dump error output of kubeadm init to log file - when: kubeadm_init.changed and kubeadm_init.stderr - copy: - content: "{{ kubeadm_init.stderr }}\n" - dest: /etc/kubernetes/kubeadm-init.errors - - - name: create bootstrap token for existing cluster - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_generate - - -### cluster is already initialized but config has changed - -- name: upgrade cluster config - when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed - block: - - - name: fail for cluster upgrades - fail: - msg: "upgrading cluster config is currently not supported!" - - -### cluster is already initialized - -- name: prepare cluster for new nodes - when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed - block: - - - name: fetch list of current nodes - command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - - - name: create bootstrap token for existing cluster - when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_create - - -## calculate certificate digest - -- name: install openssl - apt: - name: openssl - state: present - -- name: get ca certificate digest - shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" - args: - executable: /bin/bash - check_mode: no - register: kube_ca_openssl - changed_when: False - -- name: set variables needed by kubernetes/nodes to join the cluster - set_fact: - kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" - kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" - delegate_to: "{{ item }}" - delegate_facts: True - loop: "{{ groups['_kubernetes_nodes_'] }}" - - -## install node-local-dns - -- name: generate node-local dns cache config - template: - src: node-local-dns.yml.j2 - dest: /etc/kubernetes/node-local-dns.yml - - ## TODO: move to server-side apply (GA since 1.22) -- name: install node-local dns cache - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml - register: kube_node_local_dns_apply_result - changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 - - -## Network Plugin - -- name: install network plugin - include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml deleted file mode 100644 index 4759b7fd..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: fetch secrets needed for secondary master - run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" - block: - - - name: fetch list of current nodes - command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - - - name: upload certs - when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm init phase upload-certs --upload-certs - check_mode: no - register: kubeadm_upload_certs - - -- name: extracting encryption key for certs - set_fact: - kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - -- name: join kubernetes secondary master node and store log - block: - - name: join kubernetes secondary master node - throttle: 1 - command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - - always: - - name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log - - - name: dump error output of kubeadm join to log file - when: kubeadm_join.changed and kubeadm_join.stderr - copy: - content: "{{ kubeadm_join.stderr }}\n" - dest: /etc/kubernetes/kubeadm-join.errors - - # TODO: acutally check if node has registered -- name: give the new master(s) a moment to register - when: kubeadm_join is changed - pause: # noqa 503 - seconds: 5 diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 deleted file mode 100644 index 345c9bf9..00000000 --- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 +++ /dev/null @@ -1,13 +0,0 @@ -kind: EncryptionConfiguration -apiVersion: apiserver.config.k8s.io/v1 -resources: - - resources: - - secrets - providers: - - secretbox: - keys: -{% for key in kubernetes_secrets.encryption_config_keys %} - - name: key{{ loop.index }} - secret: {{ key }} -{% endfor %} - - identity: {} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 deleted file mode 100644 index 2fa98ed6..00000000 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ /dev/null @@ -1,53 +0,0 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} -{# #} -apiVersion: kubeadm.k8s.io/v1beta2 -kind: InitConfiguration -{# TODO: this is ugly but we want to create our own token so we can #} -{# better control it's lifetime #} -bootstrapTokens: -- ttl: "1s" -localAPIEndpoint: - bindPort: 6442 -{% if kubernetes_overlay_node_ip is defined %} - advertiseAddress: {{ kubernetes_overlay_node_ip }} -{% endif %} -nodeRegistration: - criSocket: {{ kubernetes_cri_socket }} ---- -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -kubernetesVersion: {{ kubernetes_version }} -clusterName: {{ kubernetes.cluster_name }} -imageRepository: k8s.gcr.io -controlPlaneEndpoint: 127.0.0.1:6443 -networking: - dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} - podSubnet: {{ kubernetes.pod_ip_range }} - serviceSubnet: {{ kubernetes.service_ip_range }} -apiServer: - extraArgs: - encryption-provider-config: /etc/kubernetes/encryption/config - extraVolumes: - - name: encryption-config - hostPath: /etc/kubernetes/encryption - mountPath: /etc/kubernetes/encryption - readOnly: true - pathType: Directory -{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} - certSANs: [] -{% else %} - certSANs: - {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} -{% endif %} -controllerManager: - extraArgs: - node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" -scheduler: {} -dns: - type: CoreDNS ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -clusterDNS: -- {{ kubernetes_nodelocal_dnscache_ip }} -cgroupDriver: systemd diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 deleted file mode 100644 index a2660db2..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 +++ /dev/null @@ -1,235 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 deleted file mode 100644 index 382164cb..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 +++ /dev/null @@ -1,236 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 deleted file mode 100644 index 382164cb..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 +++ /dev/null @@ -1,236 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 deleted file mode 100644 index e343f4a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --cluster-cidr={{ kubernetes.pod_ip_range }} - - --run-router=false - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 deleted file mode 100644 index ec30d670..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=false - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 deleted file mode 100644 index d536d5a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: node-local-dns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns-upstream - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "KubeDNSUpstream" -spec: - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: 53 - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: 53 - selector: - k8s-app: kube-dns ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: node-local-dns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: Reconcile -data: - Corefile: | - {{ kubernetes.dns_domain | default('cluster.local') }}:53 { - errors - cache { - success 9984 30 - denial 9984 5 - } - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - health {{ kubernetes_nodelocal_dnscache_ip }}:8080 - } - in-addr.arpa:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - } - ip6.arpa:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - } - .:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__UPSTREAM__SERVERS__ { - force_tcp - } - prometheus :9253 - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: node-local-dns - namespace: kube-system - labels: - k8s-app: node-local-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: - updateStrategy: - rollingUpdate: - maxUnavailable: 10% - selector: - matchLabels: - k8s-app: node-local-dns - template: - metadata: - labels: - k8s-app: node-local-dns - annotations: - prometheus.io/port: "9253" - prometheus.io/scrape: "true" - spec: - priorityClassName: system-node-critical - serviceAccountName: node-local-dns - hostNetwork: true - dnsPolicy: Default # Don't use cluster DNS. - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - - effect: "NoExecute" - operator: "Exists" - - effect: "NoSchedule" - operator: "Exists" - containers: - - name: node-cache - image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 - resources: - requests: - cpu: 25m - memory: 5Mi - args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] - securityContext: - privileged: true - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9253 - name: metrics - protocol: TCP - livenessProbe: - httpGet: - host: {{ kubernetes_nodelocal_dnscache_ip }} - path: /health - port: 8080 - initialDelaySeconds: 60 - timeoutSeconds: 5 - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: config-volume - mountPath: /etc/coredns - - name: kube-dns-config - mountPath: /etc/kube-dns - volumes: - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - - name: config-volume - configMap: - name: node-local-dns - items: - - key: Corefile - path: Corefile.base ---- -# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. -# We use this to expose metrics to Prometheus. -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/port: "9253" - prometheus.io/scrape: "true" - labels: - k8s-app: node-local-dns - name: node-local-dns - namespace: kube-system -spec: - clusterIP: None - ports: - - name: metrics - port: 9253 - targetPort: 9253 - selector: - k8s-app: node-local-dns diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml deleted file mode 100644 index 13937bcf..00000000 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: join kubernetes node and store log - block: - - name: join kubernetes node - command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - - always: - - name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log - - - name: dump error output of kubeadm join to log file - when: kubeadm_join.changed and kubeadm_join.stderr - copy: - content: "{{ kubeadm_join.stderr }}\n" - dest: /etc/kubernetes/kubeadm-join.errors diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml index 71ed0d04..45020963 100644 --- a/roles/kubernetes/kubeadm/prune/tasks/main.yml +++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: remove nodes from api server run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" command: "kubectl delete node {{ item }}" diff --git a/roles/kubernetes/kubeadm/upgrade b/roles/kubernetes/kubeadm/upgrade index c2f97d40..2cfa18cd 100644 --- a/roles/kubernetes/kubeadm/upgrade +++ b/roles/kubernetes/kubeadm/upgrade @@ -1,8 +1,8 @@ Cluster Upgrades: ================= -primary master: ---------------- +primary control-plane node: +--------------------------- VERSION=1.23.1 @@ -26,8 +26,8 @@ apt-get update && apt-get install -y "kubelet=$VERSION-00" "kubectl=$VERSION-00" kubectl uncordon $(hostname) -secondary master: ------------------ +secondary control-plane node: +----------------------------- VERSION=1.23.1 @@ -55,7 +55,7 @@ apt-get update sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubeadm.pref apt-get install -y "kubeadm=$VERSION-00" -@primary master: kubectl drain --ignore-daemonsets --delete-emptydir-data +@primary control-plane node: kubectl drain --ignore-daemonsets --delete-emptydir-data kubeadm upgrade node sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubelet.pref @@ -64,4 +64,4 @@ apt-get update && apt-get install -y kubelet="$VERSION-00" "kubectl=$VERSION-00" // security updates + reboot ? -@primary master: kubectl uncordon +@primary control-plane node: kubectl uncordon diff --git a/roles/kubernetes/kubeadm/worker/tasks/main.yml b/roles/kubernetes/kubeadm/worker/tasks/main.yml new file mode 100644 index 00000000..eabb7a1f --- /dev/null +++ b/roles/kubernetes/kubeadm/worker/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: join kubernetes worker node and store log + block: + - name: join kubernetes worker node + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors -- cgit v1.2.3 From 09c8120540735c22316a55593f4c56bcd6ae7e88 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 01:08:36 +0200 Subject: add support for cluster with kubernetes 1.24 --- inventory/group_vars/k8s-chtest/vars.yml | 2 +- roles/kubernetes/kubeadm/control-plane/tasks/primary.yml | 6 +++--- .../kubeadm/control-plane/templates/kubeadm.config.j2 | 12 +++++++----- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'roles/kubernetes') diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index 66824314..939d93da 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -1,5 +1,5 @@ --- -kubernetes_version: 1.23.6 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kube-router kubernetes_network_plugin_version: 1.4.0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml index 22a5af42..450c3a1a 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -28,8 +28,8 @@ - name: initialize kubernetes primary control-plane node and store log block: - name: initialize kubernetes primary control-plane node - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt register: kubeadm_init @@ -47,7 +47,7 @@ content: "{{ kubeadm_init.stderr }}\n" dest: /etc/kubernetes/kubeadm-init.errors - - name: create bootstrap token for existing cluster + - name: create bootstrap token for new cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 index 2fa98ed6..a0f3efe7 100644 --- a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 @@ -1,6 +1,6 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} @@ -11,10 +11,14 @@ localAPIEndpoint: {% if kubernetes_overlay_node_ip is defined %} advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} +{% if kubernetes_network_plugin_replaces_kube_proxy %} +skipPhases: +- addon/kube-proxy +{% endif %} nodeRegistration: criSocket: {{ kubernetes_cri_socket }} --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} @@ -43,8 +47,6 @@ controllerManager: extraArgs: node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" scheduler: {} -dns: - type: CoreDNS --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration -- cgit v1.2.3 From 92344ddc3e2181623f77f3118605323dba659c1a Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 01:16:07 +0200 Subject: upgrade kubernetes cluster addon: metrics-server --- inventory/group_vars/k8s-chtest/vars.yml | 2 +- .../templates/components.0.6.1.yml.j2 | 197 +++++++++++++++++++++ 2 files changed, 198 insertions(+), 1 deletion(-) create mode 100644 roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 (limited to 'roles/kubernetes') diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index 939d93da..ac1a3991 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -21,4 +21,4 @@ kubernetes: kubernetes_secrets: encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" -kubernetes_metrics_server_version: 0.5.2 +kubernetes_metrics_server_version: 0.6.1 diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 new file mode 100644 index 00000000..7b427254 --- /dev/null +++ b/roles/kubernetes/addons/metrics-server/templates/components.0.6.1.yml.j2 @@ -0,0 +1,197 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 -- cgit v1.2.3 From 05e65f43df9c502eb764b184a66dd1ef5a76685c Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 01:55:09 +0200 Subject: k8s/kubeadm: fix some minor TODOs --- roles/kubernetes/addons/metrics-server/tasks/main.yml | 11 ++++++++--- .../kubeadm/control-plane/tasks/net_kube-router.yml | 11 ++++++++--- .../kubeadm/control-plane/tasks/net_kubeguard.yml | 11 ++++++++--- roles/kubernetes/kubeadm/control-plane/tasks/primary.yml | 14 +++++++++----- 4 files changed, 33 insertions(+), 14 deletions(-) (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml index 5236e4e3..87c57346 100644 --- a/roles/kubernetes/addons/metrics-server/tasks/main.yml +++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml @@ -9,8 +9,13 @@ src: "components.{{ kubernetes_metrics_server_version }}.yml.j2" dest: /etc/kubernetes/addons/metrics-server/config.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if metrics-server is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/addons/metrics-server/config.yml + failed_when: false + changed_when: false + register: kube_metrics_server_diff_result + - name: install metrics-server onto the cluster + when: kube_metrics_server_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml - register: kube_metrics_server_apply_result - changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml index 0a216414..4584e583 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml @@ -4,8 +4,13 @@ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if kube-router is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kube_router_diff_result + - name: install kube-router on to the cluster + when: kube_router_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kube_router_apply_result - changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml index a572ca89..66dac49b 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml @@ -7,8 +7,13 @@ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) + - name: check if kubeguard (kube-router) is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kubeguard_diff_result + - name: install kubeguard (kube-router) on to the cluster + when: kubeguard_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kubeguard_apply_result - changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml index 450c3a1a..65a6f7c8 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -4,7 +4,6 @@ path: /etc/kubernetes/kubelet.conf register: kubeconfig_kubelet_stats - ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) - name: generate kubeadm.config template: src: kubeadm.config.j2 @@ -118,11 +117,16 @@ src: node-local-dns.yml.j2 dest: /etc/kubernetes/node-local-dns.yml - ## TODO: move to server-side apply (GA since 1.22) -- name: install node-local dns cache +- name: check if node-local dns cache is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/node-local-dns.yml + failed_when: false + changed_when: false + register: kube_node_local_dns_diff_result + +- name: install node-local dns cache + when: kube_node_local_dns_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml - register: kube_node_local_dns_apply_result - changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 ## Network Plugin -- cgit v1.2.3 From 40f958ce64fc08b5fb35aac3f05941fe4b514ec5 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 02:17:33 +0200 Subject: kubernetes/kubeadm: fix kubeguard network plugin --- inventory/group_vars/k8s-emc/vars.yml | 4 ++-- roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml | 4 ++-- .../kubeadm/base/templates/net_kubeguard/cni.conflist.j2 | 16 ++++++++++++++++ .../kubeadm/base/templates/net_kubeguard/cni.json.j2 | 12 ------------ 4 files changed, 20 insertions(+), 16 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 delete mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 (limited to 'roles/kubernetes') diff --git a/inventory/group_vars/k8s-emc/vars.yml b/inventory/group_vars/k8s-emc/vars.yml index b2a8fe39..be1c4818 100644 --- a/inventory/group_vars/k8s-emc/vars.yml +++ b/inventory/group_vars/k8s-emc/vars.yml @@ -1,5 +1,5 @@ --- -kubernetes_version: 1.23.1 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard @@ -48,4 +48,4 @@ kubeguard: kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" -kubernetes_metrics_server_version: 0.5.2 +kubernetes_metrics_server_version: 0.6.1 diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 40cee3b7..350ecdee 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -87,8 +87,8 @@ - name: install cni config template: - src: net_kubeguard/cni.json.j2 - dest: /etc/cni/net.d/kubeguard.conf + src: net_kubeguard/cni.conflist.j2 + dest: /etc/cni/net.d/kubeguard.conflist - name: install packages needed for debugging kube-router when: kubernetes_network_plugin_variant == 'with-kube-router' diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 new file mode 100644 index 00000000..240d86ef --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.1", + "name": "kubeguard", + "plugins": [ + { + "type": "bridge", + "bridge": "kubeguard-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" + } + } + ] +} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 deleted file mode 100644 index eb9e3d61..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "kubeguard", - "type": "bridge", - "bridge": "kubeguard-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" - } -} -- cgit v1.2.3