From 4fd722c4c12b441d0857c5bc29d1cd43df64b9b7 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Tue, 26 May 2020 18:08:10 +0200 Subject: finalize handling of network plugins. (needs testing) --- roles/kubernetes/base/tasks/main.yml | 4 ++-- roles/kubernetes/kubeadm/base/tasks/main.yml | 12 ++++++++++-- roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml | 7 +++++++ roles/kubernetes/kubeadm/base/tasks/net_none.yml | 7 +++++++ roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml | 3 +++ roles/kubernetes/kubeadm/master/tasks/net_none.yml | 2 ++ roles/kubernetes/kubeadm/master/tasks/primary-master.yml | 14 ++++++++++---- .../kubernetes/kubeadm/master/tasks/secondary-masters.yml | 6 ++++++ roles/kubernetes/kubeadm/node/tasks/main.yml | 6 ++++++ roles/kubernetes/kubeadm/reset/tasks/main.yml | 4 ++++ 10 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml create mode 100644 roles/kubernetes/kubeadm/base/tasks/net_none.yml create mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml create mode 100644 roles/kubernetes/kubeadm/master/tasks/net_none.yml (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 602266d5..da5f7408 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -46,12 +46,12 @@ {% endif %} source <(crictl completion) -- name: add dummy group with gid 998 +- name: add dummy group with gid 990 group: name: app gid: 990 -- name: add dummy user with uid 998 +- name: add dummy user with uid 990 user: name: app uid: 990 diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 2d2bd324..7f2e02c2 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -59,5 +59,13 @@ content: | alias hatop="hatop -s /var/run/haproxy/admin.sock" -# - name: prepare network plugin -# include_tasks: "net_{{ kubernetes_network_plugin }}.yml" +## loading the modules temporarly because kubeadm will complain if they are not there +# but i don't think it is necessary to make this persistent, also ignoring changes here +- name: load module br_netfilter to satisfy kubeadm init/join + modprobe: + name: br_netfilter + state: present + changed_when: false + +- name: prepare network plugin + include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml new file mode 100644 index 00000000..0924c458 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -0,0 +1,7 @@ +--- +- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set + run_once: yes + assert: + msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false." + that: + - not kubernetes_network_plugin_replaces_kube_proxy diff --git a/roles/kubernetes/kubeadm/base/tasks/net_none.yml b/roles/kubernetes/kubeadm/base/tasks/net_none.yml new file mode 100644 index 00000000..0924c458 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/tasks/net_none.yml @@ -0,0 +1,7 @@ +--- +- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set + run_once: yes + assert: + msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false." + that: + - not kubernetes_network_plugin_replaces_kube_proxy diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml new file mode 100644 index 00000000..95fee7c8 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml @@ -0,0 +1,3 @@ +--- +### kubeguard needs to be deployed before the cluster has been initialized. +### there is nothing more todo here. diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/master/tasks/net_none.yml new file mode 100644 index 00000000..bf1a16d5 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/net_none.yml @@ -0,0 +1,2 @@ +--- +## this "plugin" is for testing purposes only diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index f24e9ac1..432f7479 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -27,8 +27,8 @@ - name: initialize kubernetes master and store log block: - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt register: kubeadm_init @@ -40,6 +40,12 @@ content: "{{ kubeadm_init.stdout }}\n" dest: /etc/kubernetes/kubeadm-init.log + - name: dump error output of kubeadm init to log file + when: kubeadm_init.changed and kubeadm_init.stderr + copy: + content: "{{ kubeadm_init.stderr }}\n" + dest: /etc/kubernetes/kubeadm-init.errors + - name: create bootstrap token for existing cluster command: kubeadm token create --ttl 42m check_mode: no @@ -119,5 +125,5 @@ ## Network Plugin -# - name: install network plugin -# include_tasks: "net_{{ kubernetes_network_plugin }}.yml" +- name: install network plugin + include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 31fb31d6..37f108a7 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -42,6 +42,12 @@ content: "{{ kubeadm_join.stdout }}\n" dest: /etc/kubernetes/kubeadm-join.log + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors + # TODO: acutally check if node has registered - name: give the new master(s) a moment to register when: kubeadm_join is changed diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 655b1b18..6b3d18ae 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -14,3 +14,9 @@ copy: # noqa 503 content: "{{ kubeadm_join.stdout }}\n" dest: /etc/kubernetes/kubeadm-join.log + + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index c35e2bfc..1ecf1c1e 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -6,9 +6,13 @@ loop: - /etc/kubernetes/kubeadm.config - /etc/kubernetes/kubeadm-init.log + - /etc/kubernetes/kubeadm-init.errors - /etc/kubernetes/kubeadm-join.log + - /etc/kubernetes/kubeadm-join.errors - /etc/kubernetes/pki - /etc/kubernetes/encryption + - /etc/kubernetes/network-plugin.yml + - /etc/kubernetes/node-local-dns.yml file: path: "{{ item }}" state: absent -- cgit v1.2.3 From 9fdd190e0ac9ac5b82c40c2f90aa25a3c0c0b0ae Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Wed, 17 Jun 2020 00:13:46 +0200 Subject: kubernetes: install addon configs into common sub-dir --- roles/kubernetes/addons/metrics-server/tasks/main.yml | 9 +++++++-- roles/kubernetes/kubeadm/reset/tasks/main.yml | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml index e09106c1..fb725a87 100644 --- a/roles/kubernetes/addons/metrics-server/tasks/main.yml +++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml @@ -1,10 +1,15 @@ --- +- name: create base directory for metrics-server addon + file: + path: /etc/kubernetes/addons/metrics-server + state: directory + - name: copy config for metrics-server template: src: "components.{{ kubernetes_metrics_server_version }}.yml.j2" - dest: /etc/kubernetes/metrics-server.yml + dest: /etc/kubernetes/addons/metrics-server/config.yml - name: install metrics-server onto the cluster - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/metrics-server.yml + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml register: kube_metrics_server_apply_result changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index 1ecf1c1e..57a3faf4 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -13,6 +13,7 @@ - /etc/kubernetes/encryption - /etc/kubernetes/network-plugin.yml - /etc/kubernetes/node-local-dns.yml + - /etc/kubernetes/addons file: path: "{{ item }}" state: absent -- cgit v1.2.3 From 75372fa6a0e84ca8f3a6d6da4a9b389da4ba0b86 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Wed, 17 Jun 2020 11:57:05 +0200 Subject: kubernetes: add chaos-at-home test cluster and fix some bugs --- chaos-at-home/group_vars/k8s-chtest.yml | 10 +++++ chaos-at-home/k8s-chtest.yml | 37 +++++++++++++++++ dan/k8s-emc.yml | 4 +- inventory/group_vars/k8s-chtest/main.yml | 41 +++++++++++++++++++ inventory/host_vars/ch-hroottest.yml | 3 ++ inventory/host_vars/ch-k8s-m0.yml | 47 ++++++++++++++++++++++ inventory/host_vars/ch-k8s-m1.yml | 47 ++++++++++++++++++++++ inventory/host_vars/ch-k8s-m2.yml | 44 ++++++++++++++++++++ inventory/host_vars/ch-k8s-w0.yml | 44 ++++++++++++++++++++ inventory/host_vars/ch-k8s-w1.yml | 44 ++++++++++++++++++++ inventory/host_vars/sk-2019vm.yml | 4 ++ inventory/hosts.ini | 38 +++++++++++++---- .../node/templates/kubeguard-peer.service.j2 | 2 +- spreadspace/k8s-lwl.yml | 4 +- 14 files changed, 359 insertions(+), 10 deletions(-) create mode 100644 chaos-at-home/group_vars/k8s-chtest.yml create mode 100644 chaos-at-home/k8s-chtest.yml create mode 100644 inventory/group_vars/k8s-chtest/main.yml create mode 100644 inventory/host_vars/ch-k8s-m0.yml create mode 100644 inventory/host_vars/ch-k8s-m1.yml create mode 100644 inventory/host_vars/ch-k8s-m2.yml create mode 100644 inventory/host_vars/ch-k8s-w0.yml create mode 100644 inventory/host_vars/ch-k8s-w1.yml (limited to 'roles/kubernetes') diff --git a/chaos-at-home/group_vars/k8s-chtest.yml b/chaos-at-home/group_vars/k8s-chtest.yml new file mode 100644 index 00000000..b824f9dd --- /dev/null +++ b/chaos-at-home/group_vars/k8s-chtest.yml @@ -0,0 +1,10 @@ +$ANSIBLE_VAULT;1.2;AES256;chaos-at-home +64343538336637373635323961366666663233376166326663316362346135353465363432616462 +6530623534623435366466656163343436333064316434650a333232643966653634663531396138 +66643633656133396139353565313834653165353331386637316664383237393237633232393337 +3363626365306538380a333361613761343263356639656632633030626265653730393232653165 +32303034393934303538386664616366613339316265653734656562303232396234623733316532 +32313837623163633663633635396664313732323939663633613238303436656534336432363433 +32623863373239326133303932336361366164383462633730653934333830346636616630356666 +37636638666332393639353738623135313331336166333435363063373733313437613264323138 +39373564363637323034373636323430323437623636623935396237323263383362 diff --git a/chaos-at-home/k8s-chtest.yml b/chaos-at-home/k8s-chtest.yml new file mode 100644 index 00000000..e3daf681 --- /dev/null +++ b/chaos-at-home/k8s-chtest.yml @@ -0,0 +1,37 @@ +--- +- name: Basic Node Setup + hosts: k8s-chtest + roles: + - role: apt-repo/base + - role: core/base + - role: core/sshd + - role: core/zsh + +- import_playbook: ../common/kubernetes-cluster-layout.yml + vars: + kubernetes_cluster_layout: + nodes_group: k8s-chtest + masters: + - ch-k8s-m0 + - ch-k8s-m1 + - ch-k8s-m2 + primary_master: ch-k8s-m0 + +### hack hack hack... +- name: cook kubernetes secrets + hosts: _kubernetes_nodes_ + gather_facts: no + tasks: + - set_fact: + kubernetes_secrets_cooked: "{{ kubernetes_secrets }}" + - when: external_ip is defined + set_fact: + external_ip_cooked: "{{ external_ip }}" + +- import_playbook: ../common/kubernetes-cluster.yml +- import_playbook: ../common/kubernetes-cluster-cleanup.yml + +- name: install addons + hosts: _kubernetes_primary_master_ + roles: + - role: kubernetes/addons/metrics-server diff --git a/dan/k8s-emc.yml b/dan/k8s-emc.yml index 7b4e9158..d81d8358 100644 --- a/dan/k8s-emc.yml +++ b/dan/k8s-emc.yml @@ -16,11 +16,13 @@ ### hack hack hack... - name: cook kubernetes secrets - hosts: _kubernetes_masters_ + hosts: _kubernetes_nodes_ gather_facts: no tasks: - set_fact: kubernetes_secrets_cooked: "{{ kubernetes_secrets }}" + - when: external_ip is defined + set_fact: external_ip_cooked: "{{ external_ip }}" - import_playbook: ../common/kubernetes-cluster.yml diff --git a/inventory/group_vars/k8s-chtest/main.yml b/inventory/group_vars/k8s-chtest/main.yml new file mode 100644 index 00000000..2e5d56d3 --- /dev/null +++ b/inventory/group_vars/k8s-chtest/main.yml @@ -0,0 +1,41 @@ +--- +docker_pkg_provider: docker-com +docker_pkg_name: docker-ce + +kubernetes_version: 1.18.3 +kubernetes_container_runtime: docker +kubernetes_network_plugin: kubeguard + +kubernetes: + cluster_name: chtest + + dedicated_master: True + api_extra_sans: + - 178.63.180.139 + - 178.63.180.140 + + pod_ip_range: 172.18.0.0/16 + pod_ip_range_size: 24 + service_ip_range: 172.18.192.0/18 + + +kubernetes_secrets: + encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" + + +kubeguard: + ## node_index must be in the range between 1 and 190 -> 189 hosts possible + ## + ## hardcoded hostnames are not nice but if we do this via host_vars + ## the info is spread over multiple files and this makes it more diffcult + ## to find mistakes, so it is nicer to keep it in one place... + node_index: + ch-k8s-w0: 1 + ch-k8s-w1: 2 + ch-k8s-m0: 100 + ch-k8s-m1: 101 + ch-k8s-m2: 102 + +kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" + +kubernetes_metrics_server_version: 0.3.6 diff --git a/inventory/host_vars/ch-hroottest.yml b/inventory/host_vars/ch-hroottest.yml index c56845fa..555791ca 100644 --- a/inventory/host_vars/ch-hroottest.yml +++ b/inventory/host_vars/ch-hroottest.yml @@ -44,6 +44,9 @@ vm_host: offsets: ch-hroottest-vm1: 100 ch-hroottest-obsd: 101 + ch-k8s-m2: 200 + ch-k8s-w0: 210 + ch-k8s-w1: 211 nat: yes zfs: default: diff --git a/inventory/host_vars/ch-k8s-m0.yml b/inventory/host_vars/ch-k8s-m0.yml new file mode 100644 index 00000000..30239ab3 --- /dev/null +++ b/inventory/host_vars/ch-k8s-m0.yml @@ -0,0 +1,47 @@ +--- +_vm_host_: sk-2019vm + +install: + vm: + host: "{{ _vm_host_ }}" + mem: 4096 + numcpu: 2 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 20g + properties: + refreservation: none + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}" + overlay: "{{ (hostvars[_vm_host_].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}" + +external_ip: "{{ network.primary.overlay }}" + +docker_lvm: + vg: "{{ host_name }}" + lv: docker + size: 7G + fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 diff --git a/inventory/host_vars/ch-k8s-m1.yml b/inventory/host_vars/ch-k8s-m1.yml new file mode 100644 index 00000000..30239ab3 --- /dev/null +++ b/inventory/host_vars/ch-k8s-m1.yml @@ -0,0 +1,47 @@ +--- +_vm_host_: sk-2019vm + +install: + vm: + host: "{{ _vm_host_ }}" + mem: 4096 + numcpu: 2 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 20g + properties: + refreservation: none + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}" + overlay: "{{ (hostvars[_vm_host_].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}" + +external_ip: "{{ network.primary.overlay }}" + +docker_lvm: + vg: "{{ host_name }}" + lv: docker + size: 7G + fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 diff --git a/inventory/host_vars/ch-k8s-m2.yml b/inventory/host_vars/ch-k8s-m2.yml new file mode 100644 index 00000000..a41c97a8 --- /dev/null +++ b/inventory/host_vars/ch-k8s-m2.yml @@ -0,0 +1,44 @@ +--- +_vm_host_: ch-hroottest + +install: + vm: + host: "{{ _vm_host_ }}" + mem: 4096 + numcpu: 2 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 20g + properties: + refreservation: none + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}" + +docker_lvm: + vg: "{{ host_name }}" + lv: docker + size: 7G + fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 diff --git a/inventory/host_vars/ch-k8s-w0.yml b/inventory/host_vars/ch-k8s-w0.yml new file mode 100644 index 00000000..5cf6d444 --- /dev/null +++ b/inventory/host_vars/ch-k8s-w0.yml @@ -0,0 +1,44 @@ +--- +_vm_host_: ch-hroottest + +install: + vm: + host: "{{ _vm_host_ }}" + mem: 4096 + numcpu: 4 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 100g + properties: + refreservation: none + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}" + +docker_lvm: + vg: "{{ host_name }}" + lv: docker + size: 15G + fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 15G + fs: ext4 diff --git a/inventory/host_vars/ch-k8s-w1.yml b/inventory/host_vars/ch-k8s-w1.yml new file mode 100644 index 00000000..5cf6d444 --- /dev/null +++ b/inventory/host_vars/ch-k8s-w1.yml @@ -0,0 +1,44 @@ +--- +_vm_host_: ch-hroottest + +install: + vm: + host: "{{ _vm_host_ }}" + mem: 4096 + numcpu: 4 + autostart: True + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 100g + properties: + refreservation: none + interfaces: + - bridge: br-public + name: primary0 + +network: + nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}" + +docker_lvm: + vg: "{{ host_name }}" + lv: docker + size: 15G + fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 15G + fs: ext4 diff --git a/inventory/host_vars/sk-2019vm.yml b/inventory/host_vars/sk-2019vm.yml index 503995ee..de162712 100644 --- a/inventory/host_vars/sk-2019vm.yml +++ b/inventory/host_vars/sk-2019vm.yml @@ -62,6 +62,8 @@ vm_host: # emc-master: 137 lw-master: 137 ele-gwhetzner: 138 + ch-k8s-m0: 139 + ch-k8s-m1: 140 ch-mimas: 142 sk-testvm: 253 nat: yes @@ -72,6 +74,8 @@ vm_host: # emc-master: 1 lw-master: 1 ele-gwhetzner: 2 + ch-k8s-m0: 3 + ch-k8s-m1: 4 ch-mimas: 6 sk-testvm: 7 zfs: diff --git a/inventory/hosts.ini b/inventory/hosts.ini index ad231374..9add78d3 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -32,6 +32,7 @@ ch-hroottest-obsd host_name=hroot-test-obsd mz-chaos-at-home chaos-at-home-switches chaos-at-home-ap +chaos-at-home-k8s [mz-chaos-at-home] mz-router ansible_host=chmz-router @@ -53,6 +54,13 @@ ch-ap1 host_name=ap1 ch-router ch-pan +[chaos-at-home-k8s] +ch-k8s-m0 host_name=k8s-master0 +ch-k8s-m1 host_name=k8s-master1 +ch-k8s-m2 host_name=k8s-master2 +ch-k8s-w0 host_name=k8s-worker0 +ch-k8s-w1 host_name=k8s-worker1 + [realraum:vars] host_domain=realraum.at @@ -261,6 +269,8 @@ sk-tomnext-nc sk-tomnext-hp ch-hroottest-vm1 ch-hroottest-obsd +ch-k8s-m[0:2] +ch-k8s-w[0:1] [hroot] sk-2019 @@ -313,9 +323,18 @@ ele-dolmetsch-raspi +### Elevate Festival +[elevate-festival:children] +elevate +k8s-emc + + + +## Kubernetes [kubernetes-cluster:children] k8s-emc k8s-lwl +k8s-chtest [standalone-kubelet] sk-cloudia @@ -350,13 +369,6 @@ k8s-emc-distribution k8s-emc-streamer -### Elevate Festival -[elevate-festival:children] -elevate -k8s-emc - - - ### Kubernetes Cluster: lendwirbel-live [k8s-lwl-encoder] lw-dione @@ -376,3 +388,15 @@ k8s-lwl-master k8s-lwl-encoder k8s-lwl-distribution k8s-lwl-streamer + + +### Kubernetes Cluster: ch-test +[k8s-chtest-master] +ch-k8s-m[0:2] + +[k8s-chtest-worker] +ch-k8s-w[0:1] + +[k8s-chtest:children] +k8s-chtest-master +k8s-chtest-worker diff --git a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 index 72b39c3f..0503ca03 100644 --- a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 @@ -6,7 +6,7 @@ After=kubeguard-interfaces.service {% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} -{% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} +{% set direct_zone = kubeguard.direct_net_zones | default({}) | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} {% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} {% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} diff --git a/spreadspace/k8s-lwl.yml b/spreadspace/k8s-lwl.yml index 17d21601..902d833d 100644 --- a/spreadspace/k8s-lwl.yml +++ b/spreadspace/k8s-lwl.yml @@ -16,11 +16,13 @@ ### hack hack hack... - name: cook kubernetes secrets - hosts: _kubernetes_masters_ + hosts: _kubernetes_nodes_ gather_facts: no tasks: - set_fact: kubernetes_secrets_cooked: "{{ kubernetes_secrets }}" + - when: external_ip is defined + set_fact: external_ip_cooked: "{{ external_ip }}" - import_playbook: ../common/kubernetes-cluster.yml -- cgit v1.2.3 From 4e0c58293c0653a28f1f5683424540feb397c9ce Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 18 Jun 2020 21:59:08 +0200 Subject: kubernetes/net/kubegard/reset: fix missing handler --- roles/kubernetes/net/kubeguard/reset/handlers/main.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 roles/kubernetes/net/kubeguard/reset/handlers/main.yml (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/net/kubeguard/reset/handlers/main.yml b/roles/kubernetes/net/kubeguard/reset/handlers/main.yml new file mode 100644 index 00000000..bb7fde2b --- /dev/null +++ b/roles/kubernetes/net/kubeguard/reset/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: reload systemd + systemd: + daemon_reload: yes -- cgit v1.2.3 From 11bedef59696c991c4663d61d8338ea8f0a04e91 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 18 Jun 2020 22:57:23 +0200 Subject: kubernetes: fix kubeguard cleanup --- common/kubernetes-cluster-cleanup.yml | 6 +++++ .../kubeadm/master/tasks/secondary-masters.yml | 2 +- .../net/kubeguard/cleanup/tasks/main.yml | 14 ++++++++++ roles/kubernetes/net/kubeguard/node/tasks/main.yml | 31 ++-------------------- 4 files changed, 23 insertions(+), 30 deletions(-) create mode 100644 roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml (limited to 'roles/kubernetes') diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml index 83d6945c..d56940ee 100644 --- a/common/kubernetes-cluster-cleanup.yml +++ b/common/kubernetes-cluster-cleanup.yml @@ -28,6 +28,12 @@ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" command: "kubectl delete node {{ item }}" +- name: cleanup kubeguard connections + hosts: _kubernetes_nodes_ + roles: + - role: kubernetes/net/kubeguard/cleanup + when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard' + - name: try to clean superflous nodes hosts: _kubernetes_nodes_remove_ roles: diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 37f108a7..610a8d3f 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -28,7 +28,7 @@ - name: join kubernetes secondary master node and store log block: - name: join kubernetes secondary master node - throttle: 1 ## TODO test this! + throttle: 1 command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf diff --git a/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml b/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml new file mode 100644 index 00000000..f15058d2 --- /dev/null +++ b/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: stop/disable systemd units for stale kubeguard peers + loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" + systemd: + name: "kubeguard-peer-{{ item }}.service" + state: stopped + enabled: no + failed_when: false + +- name: remove systemd units for stale kubeguard peers + loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" + file: + name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service" + state: absent diff --git a/roles/kubernetes/net/kubeguard/node/tasks/main.yml b/roles/kubernetes/net/kubeguard/node/tasks/main.yml index 0658b42c..72814e06 100644 --- a/roles/kubernetes/net/kubeguard/node/tasks/main.yml +++ b/roles/kubernetes/net/kubeguard/node/tasks/main.yml @@ -44,35 +44,8 @@ state: started enabled: yes -- name: get list of currently installed kubeguard peers - find: - path: /etc/systemd/system/ - pattern: "kubeguard-peer-*.service" - register: kubeguard_peers_installed - -- name: compute list of peers to be added - set_fact: - kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" - -- name: compute list of peers to be removed - set_fact: - kubeguard_peers_to_remove: "{{ kubeguard_peers_installed.files | map(attribute='path') | map('replace', '/etc/systemd/system/kubeguard-peer-', '') | map('replace', '.service', '') | difference(kubeguard_peers_to_add) }}" - -- name: stop/disable systemd units for stale kubeguard peers - loop: "{{ kubeguard_peers_to_remove }}" - systemd: - name: "kubeguard-peer-{{ item }}.service" - state: stopped - enabled: no - -- name: remove systemd units for stale kubeguard peers - loop: "{{ kubeguard_peers_to_remove }}" - file: - name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service" - state: absent - - name: install systemd units for every kubeguard peer - loop: "{{ kubeguard_peers_to_add }}" + loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" loop_control: loop_var: peer template: @@ -81,7 +54,7 @@ # TODO: notify restart for peers that change... - name: make sure kubeguard peer services are started and enabled - loop: "{{ kubeguard_peers_to_add }}" + loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" systemd: daemon_reload: yes name: "kubeguard-peer-{{ item }}.service" -- cgit v1.2.3 From 82139978e2ce9e72de13d35a7f038bb735d570a8 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 19 Jun 2020 20:47:05 +0200 Subject: hatop is dead, switch to haproxyctl --- roles/kubernetes/kubeadm/base/tasks/main.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 7f2e02c2..7d882f31 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -3,7 +3,7 @@ apt: name: - haproxy - - hatop + - haproxyctl - "kubeadm={{ kubernetes_version }}-00" - "kubectl={{ kubernetes_version }}-00" state: present @@ -48,17 +48,6 @@ state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}" enabled: yes -- name: add hatop config for shells - loop: - - zsh - - bash - blockinfile: - path: "/root/.{{ item }}rc" - create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###" - content: | - alias hatop="hatop -s /var/run/haproxy/admin.sock" - ## loading the modules temporarly because kubeadm will complain if they are not there # but i don't think it is necessary to make this persistent, also ignoring changes here - name: load module br_netfilter to satisfy kubeadm init/join -- cgit v1.2.3 From 2ecd6ff7a3390d86f40f062b177fe9babd676f22 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 20 Jun 2020 01:53:46 +0200 Subject: kubernetes: move kubeguard to kubeadm/base --- common/kubernetes-cluster.yml | 2 - .../kubeadm/base/filter_plugins/net_kubeguard.py | 33 +++++++++ .../kubeadm/base/tasks/net_kubeguard.yml | 77 +++++++++++++++++++++ .../base/templates/net_kubeguard/ifupdown.sh.j2 | 55 +++++++++++++++ .../base/templates/net_kubeguard/k8s.json.j2 | 12 ++++ .../net_kubeguard/kubeguard-interfaces.service.j2 | 12 ++++ .../net_kubeguard/kubeguard-peer.service.j2 | 37 ++++++++++ .../node/files/kubeguard-interfaces.service | 12 ---- .../net/kubeguard/node/filter_plugins/kubeguard.py | 33 --------- .../net/kubeguard/node/handlers/main.yml | 4 -- roles/kubernetes/net/kubeguard/node/tasks/main.yml | 80 ---------------------- .../net/kubeguard/node/templates/ifupdown.sh.j2 | 55 --------------- .../net/kubeguard/node/templates/k8s.json.j2 | 12 ---- .../node/templates/kubeguard-peer.service.j2 | 37 ---------- 14 files changed, 226 insertions(+), 235 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 delete mode 100644 roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service delete mode 100644 roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py delete mode 100644 roles/kubernetes/net/kubeguard/node/handlers/main.yml delete mode 100644 roles/kubernetes/net/kubeguard/node/tasks/main.yml delete mode 100644 roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2 delete mode 100644 roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2 delete mode 100644 roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 (limited to 'roles/kubernetes') diff --git a/common/kubernetes-cluster.yml b/common/kubernetes-cluster.yml index 459fd664..fe26d90d 100644 --- a/common/kubernetes-cluster.yml +++ b/common/kubernetes-cluster.yml @@ -36,8 +36,6 @@ - name: kubernetes base installation hosts: _kubernetes_nodes_ roles: - - role: kubernetes/net/kubeguard/node - when: kubernetes_network_plugin == 'kubeguard' - role: kubernetes/base - role: kubernetes/kubeadm/base diff --git a/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py new file mode 100644 index 00000000..2220e545 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py @@ -0,0 +1,33 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import errors + + +def kubeguard_direct_net_zone(data, myname, peer): + try: + zones = [] + for zone in data: + if myname in data[zone]['node_interface'] and peer in data[zone]['node_interface']: + zones.append(zone) + + if not zones: + return "" + if len(zones) > 1: + raise errors.AnsibleFilterError("host '%s' and '%s' have multiple direct net zones in common: %s" % + (myname, peer, zones.join(','))) + return zones[0] + + except Exception as e: + raise errors.AnsibleFilterError("kubeguard_direct_net_zones(): %s" % str(e)) + + +class FilterModule(object): + + ''' Kubeguard Network Filters ''' + filter_map = { + 'kubeguard_direct_net_zone': kubeguard_direct_net_zone, + } + + def filters(self): + return self.filter_map diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 0924c458..8c5f5065 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -5,3 +5,80 @@ msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false." that: - not kubernetes_network_plugin_replaces_kube_proxy + + +- name: install wireguard + import_role: + name: wireguard/base + +- name: create network config directory + file: + name: /var/lib/kubeguard/ + state: directory + +- name: install ifupdown script + template: + src: net_kubeguard/ifupdown.sh.j2 + dest: /var/lib/kubeguard/ifupdown.sh + mode: 0755 + # TODO: notify reload... this is unfortunately already to late because + # it must probably be brought down by the old version of the script + +- name: generate wireguard private key + shell: "umask 077; wg genkey > /var/lib/kubeguard/kube-wg0.privatekey" + args: + creates: /var/lib/kubeguard/kube-wg0.privatekey + +- name: fetch wireguard public key + shell: "wg pubkey < /var/lib/kubeguard/kube-wg0.privatekey" + register: kubeguard_wireguard_pubkey + changed_when: false + check_mode: no + +- name: install systemd service unit for network interfaces + template: + src: net_kubeguard/kubeguard-interfaces.service.j2 + dest: /etc/systemd/system/kubeguard-interfaces.service + # TODO: notify: reload??? + +- name: make sure kubeguard interfaces service is started and enabled + systemd: + daemon_reload: yes + name: kubeguard-interfaces.service + state: started + enabled: yes + +- name: install systemd units for every kubeguard peer + loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" + loop_control: + loop_var: peer + template: + src: net_kubeguard/kubeguard-peer.service.j2 + dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service" + # TODO: notify restart for peers that change... + +- name: make sure kubeguard peer services are started and enabled + loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" + systemd: + daemon_reload: yes + name: "kubeguard-peer-{{ item }}.service" + state: started + enabled: yes + +- name: enable IPv4 forwarding + sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: yes + state: present + reload: yes + +- name: create cni config directory + file: + name: /etc/cni/net.d + state: directory + +- name: install cni config + template: + src: net_kubeguard/k8s.json.j2 + dest: /etc/cni/net.d/kubeguard.json diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 new file mode 100644 index 00000000..d8153102 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +CONF_D="/var/lib/kubeguard/" + +INET_IF="{{ ansible_default_ipv4.interface }}" + +POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" + +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} +BR_IF="kube-br0" +BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" +BR_IP_CIDR="{{ br_net | ipaddr(1) }}" +BR_NET_CIDR="{{ br_net }}" + +TUN_IF="kube-wg0" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" + + +case "$1" in + up) + # bring up bridge for local pods + ip link add dev "$BR_IF" type bridge + ip addr add dev "$BR_IF" "$BR_IP_CIDR" + ip link set up dev "$BR_IF" + iptables -t nat -A POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE + modprobe br_netfilter + + # bring up wireguard tunnel to other nodes + ip link add dev "$TUN_IF" type wireguard + ip addr add dev "$TUN_IF" "$TUN_IP_CIDR" + wg set "$TUN_IF" listen-port {{ kubeguard_wireguard_port | default(51820) }} private-key "$CONF_D/$TUN_IF.privatekey" + ip link set up dev "$TUN_IF" + + # make pods and service IPs reachable + # !!! use IP of bridge as source so we don't produce martians if direct-zones are involved!!! + ip route add "$POD_NET_CIDR" dev "$TUN_IF" src "$BR_IP" + ;; + down) + # bring down wireguard tunnel to other nodes + ip route del "$POD_NET_CIDR" dev "$TUN_IF" + ip link del dev "$TUN_IF" + + # bring down bridge for local pods + iptables -t nat -D POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE + ip link del dev "$BR_IF" + ;; + *) + echo "usage: $0 (up|down)" + exit 1 + ;; +esac + +exit 0 diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 new file mode 100644 index 00000000..65b1357a --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 @@ -0,0 +1,12 @@ +{ + "cniVersion": "0.3.1", + "name": "k8s", + "type": "bridge", + "bridge": "kube-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" + } +} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 new file mode 100644 index 00000000..35fc8f90 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Kubeguard Network Setup +After=network.target + +[Service] +Type=oneshot +ExecStart=/var/lib/kubeguard/ifupdown.sh up +ExecStop=/var/lib/kubeguard/ifupdown.sh down +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 new file mode 100644 index 00000000..92300253 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 @@ -0,0 +1,37 @@ +[Unit] +Description=Kubernetes Network Peer {{ peer }} +After=network.target +Requires=kubeguard-interfaces.service +After=kubeguard-interfaces.service + +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} +{% set direct_zone = kubeguard.direct_net_zones | default({}) | kubeguard_direct_net_zone(inventory_hostname, peer) -%} +{% if direct_zone %} +{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} +{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} +{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} +{% else %} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} +{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} +{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%} +{% set wg_port = hostvars[peer].kubeguard_wireguard_port | default(51820) -%} +{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} +{% endif %} +[Service] +Type=oneshot +{% if direct_zone %} +ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} +ExecStart=/sbin/ip link set up dev {{ direct_interface }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} +ExecStop=/sbin/ip route del {{ pod_net_peer }} +ExecStop=/sbin/ip link set down dev {{ direct_interface }} +ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} +{% else %} +ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 +ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove +{% endif %} +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service b/roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service deleted file mode 100644 index 35fc8f90..00000000 --- a/roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Kubeguard Network Setup -After=network.target - -[Service] -Type=oneshot -ExecStart=/var/lib/kubeguard/ifupdown.sh up -ExecStop=/var/lib/kubeguard/ifupdown.sh down -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py b/roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py deleted file mode 100644 index 199ff14b..00000000 --- a/roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible import errors - - -def direct_net_zone(data, myname, peer): - try: - zones = [] - for zone in data: - if myname in data[zone]['node_interface'] and peer in data[zone]['node_interface']: - zones.append(zone) - - if not zones: - return "" - if len(zones) > 1: - raise errors.AnsibleFilterError("host '%s' and '%s' have multiple direct net zones in common: %s" % - (myname, peer, zones.join(','))) - return zones[0] - - except Exception as e: - raise errors.AnsibleFilterError("direct_net_zones(): %s" % str(e)) - - -class FilterModule(object): - - ''' Kubeguard Network Filters ''' - filter_map = { - 'direct_net_zone': direct_net_zone, - } - - def filters(self): - return self.filter_map diff --git a/roles/kubernetes/net/kubeguard/node/handlers/main.yml b/roles/kubernetes/net/kubeguard/node/handlers/main.yml deleted file mode 100644 index bb7fde2b..00000000 --- a/roles/kubernetes/net/kubeguard/node/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: reload systemd - systemd: - daemon_reload: yes diff --git a/roles/kubernetes/net/kubeguard/node/tasks/main.yml b/roles/kubernetes/net/kubeguard/node/tasks/main.yml deleted file mode 100644 index 72814e06..00000000 --- a/roles/kubernetes/net/kubeguard/node/tasks/main.yml +++ /dev/null @@ -1,80 +0,0 @@ ---- -- name: install wireguard - import_role: - name: wireguard/base - -- name: create network config directory - file: - name: /var/lib/kubeguard/ - state: directory - -- name: configure wireguard port - set_fact: - kubeguard_wireguard_port: "{{ kubernetes.wireguard_port | default(51820) }}" - -- name: install ifupdown script - template: - src: ifupdown.sh.j2 - dest: /var/lib/kubeguard/ifupdown.sh - mode: 0755 - # TODO: notify reload... this is unfortunately already to late because - # it must probably be brought down by the old version of the script - -- name: generate wireguard private key - shell: "umask 077; wg genkey > /var/lib/kubeguard/kube-wg0.privatekey" - args: - creates: /var/lib/kubeguard/kube-wg0.privatekey - -- name: fetch wireguard public key - shell: "wg pubkey < /var/lib/kubeguard/kube-wg0.privatekey" - register: kubeguard_wireguard_pubkey - changed_when: false - check_mode: no - -- name: install systemd service unit for network interfaces - copy: - src: kubeguard-interfaces.service - dest: /etc/systemd/system/kubeguard-interfaces.service - # TODO: notify: reload??? - -- name: make sure kubeguard interfaces service is started and enabled - systemd: - daemon_reload: yes - name: kubeguard-interfaces.service - state: started - enabled: yes - -- name: install systemd units for every kubeguard peer - loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" - loop_control: - loop_var: peer - template: - src: kubeguard-peer.service.j2 - dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service" - # TODO: notify restart for peers that change... - -- name: make sure kubeguard peer services are started and enabled - loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" - systemd: - daemon_reload: yes - name: "kubeguard-peer-{{ item }}.service" - state: started - enabled: yes - -- name: enable IPv4 forwarding - sysctl: - name: net.ipv4.ip_forward - value: '1' - sysctl_set: yes - state: present - reload: yes - -- name: create cni config directory - file: - name: /etc/cni/net.d - state: directory - -- name: install cni config - template: - src: k8s.json.j2 - dest: /etc/cni/net.d/k8s.json diff --git a/roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2 deleted file mode 100644 index 98b38cf4..00000000 --- a/roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2 +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -set -e - -CONF_D="/var/lib/kubeguard/" - -INET_IF="{{ ansible_default_ipv4.interface }}" - -POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" - -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} -BR_IF="kube-br0" -BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" -BR_IP_CIDR="{{ br_net | ipaddr(1) }}" -BR_NET_CIDR="{{ br_net }}" - -TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" - - -case "$1" in - up) - # bring up bridge for local pods - ip link add dev "$BR_IF" type bridge - ip addr add dev "$BR_IF" "$BR_IP_CIDR" - ip link set up dev "$BR_IF" - iptables -t nat -A POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE - modprobe br_netfilter - - # bring up wireguard tunnel to other nodes - ip link add dev "$TUN_IF" type wireguard - ip addr add dev "$TUN_IF" "$TUN_IP_CIDR" - wg set "$TUN_IF" listen-port {{ kubeguard_wireguard_port }} private-key "$CONF_D/$TUN_IF.privatekey" - ip link set up dev "$TUN_IF" - - # make pods and service IPs reachable - # !!! use IP of bridge as source so we don't produce martians if direct-zones are involved!!! - ip route add "$POD_NET_CIDR" dev "$TUN_IF" src "$BR_IP" - ;; - down) - # bring down wireguard tunnel to other nodes - ip route del "$POD_NET_CIDR" dev "$TUN_IF" - ip link del dev "$TUN_IF" - - # bring down bridge for local pods - iptables -t nat -D POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE - ip link del dev "$BR_IF" - ;; - *) - echo "usage: $0 (up|down)" - exit 1 - ;; -esac - -exit 0 diff --git a/roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2 deleted file mode 100644 index 65b1357a..00000000 --- a/roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "k8s", - "type": "bridge", - "bridge": "kube-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" - } -} diff --git a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 deleted file mode 100644 index 0503ca03..00000000 --- a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 +++ /dev/null @@ -1,37 +0,0 @@ -[Unit] -Description=Kubernetes Network Peer {{ peer }} -After=network.target -Requires=kubeguard-interfaces.service -After=kubeguard-interfaces.service - -{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} -{% set direct_zone = kubeguard.direct_net_zones | default({}) | direct_net_zone(inventory_hostname, peer) -%} -{% if direct_zone %} -{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} -{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} -{% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} -{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} -{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%} -{% set wg_port = hostvars[peer].kubeguard_wireguard_port -%} -{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} -{% endif %} -[Service] -Type=oneshot -{% if direct_zone %} -ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} -ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} -ExecStop=/sbin/ip route del {{ pod_net_peer }} -ExecStop=/sbin/ip link set down dev {{ direct_interface }} -ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} -{% else %} -ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 -ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove -{% endif %} -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target -- cgit v1.2.3 From ac06da81fbf302c12b235ddde0a5fae93bba20f4 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 20 Jun 2020 03:35:08 +0200 Subject: kubernetes: move kubeguard/reset to kubeadm/reset --- common/kubernetes-cluster-cleanup.yml | 2 -- roles/kubernetes/kubeadm/reset/handlers/main.yml | 4 ++++ roles/kubernetes/kubeadm/reset/tasks/main.yml | 3 +++ .../kubeadm/reset/tasks/net_kubeguard.yml | 26 ++++++++++++++++++++++ .../net/kubeguard/reset/handlers/main.yml | 4 ---- .../kubernetes/net/kubeguard/reset/tasks/main.yml | 26 ---------------------- 6 files changed, 33 insertions(+), 32 deletions(-) create mode 100644 roles/kubernetes/kubeadm/reset/handlers/main.yml create mode 100644 roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml delete mode 100644 roles/kubernetes/net/kubeguard/reset/handlers/main.yml delete mode 100644 roles/kubernetes/net/kubeguard/reset/tasks/main.yml (limited to 'roles/kubernetes') diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml index d56940ee..7c10d17c 100644 --- a/common/kubernetes-cluster-cleanup.yml +++ b/common/kubernetes-cluster-cleanup.yml @@ -38,5 +38,3 @@ hosts: _kubernetes_nodes_remove_ roles: - role: kubernetes/kubeadm/reset - - role: kubernetes/net/kubeguard/reset - when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard' diff --git a/roles/kubernetes/kubeadm/reset/handlers/main.yml b/roles/kubernetes/kubeadm/reset/handlers/main.yml new file mode 100644 index 00000000..bb7fde2b --- /dev/null +++ b/roles/kubernetes/kubeadm/reset/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index 57a3faf4..cf9c125d 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -30,3 +30,6 @@ file: path: "{{ item.path }}" state: absent + +- name: extra-cleanup for kubeguard network plugin + import_tasks: net_kubeguard.yml diff --git a/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml new file mode 100644 index 00000000..03b3f205 --- /dev/null +++ b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml @@ -0,0 +1,26 @@ +--- +- name: check if kubeguard interface service unit exists + stat: + path: /etc/systemd/system/kubeguard-interfaces.service + register: kubeguard_interface_unit + +- name: bring down kubeguard interface + when: kubeguard_interface_unit.stat.exists + systemd: + name: kubeguard-interfaces.service + state: stopped + +- name: gather list of all kubeguard related service units + find: + path: /etc/systemd/system/ + patterns: + - "kubeguard-peer-*.service" + - kubeguard-interfaces.service + register: kubeguard_units_installed + +- name: remove all kubeguard related files and directories + loop: "{{ kubeguard_units_installed.files | map(attribute='path') | list | flatten | union(['/var/lib/kubeguard']) }}" + file: + path: "{{ item }}" + state: absent + notify: reload systemd diff --git a/roles/kubernetes/net/kubeguard/reset/handlers/main.yml b/roles/kubernetes/net/kubeguard/reset/handlers/main.yml deleted file mode 100644 index bb7fde2b..00000000 --- a/roles/kubernetes/net/kubeguard/reset/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: reload systemd - systemd: - daemon_reload: yes diff --git a/roles/kubernetes/net/kubeguard/reset/tasks/main.yml b/roles/kubernetes/net/kubeguard/reset/tasks/main.yml deleted file mode 100644 index d24f9eff..00000000 --- a/roles/kubernetes/net/kubeguard/reset/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: check if kubeguard interface service unit exists - stat: - path: /etc/systemd/system/kubeguard-interfaces.service - register: kubeguard_interface_unit - -- name: bring down kubeguard interface - systemd: - name: kubeguard-interfaces.service - state: stopped - when: kubeguard_interface_unit.stat.exists - -- name: gather list of all kubeguard related service units - find: - path: /etc/systemd/system/ - patterns: - - "kubeguard-peer-*.service" - - kubeguard-interfaces.service - register: kubeguard_units_installed - -- name: remove all kubeguard related files and directories - loop: "{{ kubeguard_units_installed.files | map(attribute='path') | list | flatten | union(['/var/lib/kubeguard']) }}" - file: - path: "{{ item }}" - state: absent - notify: reload systemd -- cgit v1.2.3 From b39c3b91269a8482207863234acc298f623deae6 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 20 Jun 2020 05:20:46 +0200 Subject: kubernetes: add node pruning role --- common/kubernetes-cluster-cleanup.yml | 21 ++++-------- .../kubeadm/base/tasks/net_kubeguard.yml | 20 ++++++------ .../base/templates/net_kubeguard/cni.json.j2 | 12 +++++++ .../base/templates/net_kubeguard/ifupdown.sh.j2 | 4 +-- .../templates/net_kubeguard/interface.service.j2 | 12 +++++++ .../base/templates/net_kubeguard/k8s.json.j2 | 12 ------- .../net_kubeguard/kubeguard-interfaces.service.j2 | 12 ------- .../net_kubeguard/kubeguard-peer.service.j2 | 37 ---------------------- .../base/templates/net_kubeguard/peer.service.j2 | 37 ++++++++++++++++++++++ roles/kubernetes/kubeadm/prune/tasks/main.yml | 9 ++++++ .../kubeadm/prune/tasks/net_kubeguard.yml | 14 ++++++++ roles/kubernetes/kubeadm/prune/tasks/net_none.yml | 2 ++ .../kubeadm/reset/tasks/net_kubeguard.yml | 6 ++-- .../net/kubeguard/cleanup/tasks/main.yml | 14 -------- 14 files changed, 107 insertions(+), 105 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 delete mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 delete mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 delete mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 create mode 100644 roles/kubernetes/kubeadm/prune/tasks/main.yml create mode 100644 roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml create mode 100644 roles/kubernetes/kubeadm/prune/tasks/net_none.yml delete mode 100644 roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml (limited to 'roles/kubernetes') diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml index 7c10d17c..5647e3d6 100644 --- a/common/kubernetes-cluster-cleanup.yml +++ b/common/kubernetes-cluster-cleanup.yml @@ -13,28 +13,19 @@ add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[item].inventory_dir }}" - group: _kubernetes_nodes_remove_ + group: _kubernetes_nodes_prune_ changed_when: False - name: drain superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" + loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" - -- name: remove nodes from api server - hosts: _kubernetes_primary_master_ - tasks: - - name: remove superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - command: "kubectl delete node {{ item }}" - -- name: cleanup kubeguard connections +- name: prune superflous nodes from cluster hosts: _kubernetes_nodes_ roles: - - role: kubernetes/net/kubeguard/cleanup - when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard' + - role: kubernetes/kubeadm/prune -- name: try to clean superflous nodes - hosts: _kubernetes_nodes_remove_ +- name: wipe superflous nodes + hosts: _kubernetes_nodes_prune_ roles: - role: kubernetes/kubeadm/reset diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 8c5f5065..37b5030d 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -25,26 +25,26 @@ # it must probably be brought down by the old version of the script - name: generate wireguard private key - shell: "umask 077; wg genkey > /var/lib/kubeguard/kube-wg0.privatekey" + shell: "umask 077; wg genkey > /var/lib/kubeguard/kubeguard-wg0.privatekey" args: - creates: /var/lib/kubeguard/kube-wg0.privatekey + creates: /var/lib/kubeguard/kubeguard-wg0.privatekey - name: fetch wireguard public key - shell: "wg pubkey < /var/lib/kubeguard/kube-wg0.privatekey" + shell: "wg pubkey < /var/lib/kubeguard/kubeguard-wg0.privatekey" register: kubeguard_wireguard_pubkey changed_when: false check_mode: no -- name: install systemd service unit for network interfaces +- name: install systemd service unit for network interface template: - src: net_kubeguard/kubeguard-interfaces.service.j2 - dest: /etc/systemd/system/kubeguard-interfaces.service + src: net_kubeguard/interface.service.j2 + dest: /etc/systemd/system/kubeguard-interface.service # TODO: notify: reload??? -- name: make sure kubeguard interfaces service is started and enabled +- name: make sure kubeguard interface service is started and enabled systemd: daemon_reload: yes - name: kubeguard-interfaces.service + name: kubeguard-interface.service state: started enabled: yes @@ -53,7 +53,7 @@ loop_control: loop_var: peer template: - src: net_kubeguard/kubeguard-peer.service.j2 + src: net_kubeguard/peer.service.j2 dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service" # TODO: notify restart for peers that change... @@ -80,5 +80,5 @@ - name: install cni config template: - src: net_kubeguard/k8s.json.j2 + src: net_kubeguard/cni.json.j2 dest: /etc/cni/net.d/kubeguard.json diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 new file mode 100644 index 00000000..eb9e3d61 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 @@ -0,0 +1,12 @@ +{ + "cniVersion": "0.3.1", + "name": "kubeguard", + "type": "bridge", + "bridge": "kubeguard-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" + } +} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 index d8153102..f940d413 100644 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 @@ -9,12 +9,12 @@ INET_IF="{{ ansible_default_ipv4.interface }}" POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" {% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} -BR_IF="kube-br0" +BR_IF="kubeguard-br0" BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" BR_IP_CIDR="{{ br_net | ipaddr(1) }}" BR_NET_CIDR="{{ br_net }}" -TUN_IF="kube-wg0" +TUN_IF="kubeguard-wg0" TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 new file mode 100644 index 00000000..35fc8f90 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Kubeguard Network Setup +After=network.target + +[Service] +Type=oneshot +ExecStart=/var/lib/kubeguard/ifupdown.sh up +ExecStop=/var/lib/kubeguard/ifupdown.sh down +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 deleted file mode 100644 index 65b1357a..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/k8s.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "k8s", - "type": "bridge", - "bridge": "kube-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" - } -} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 deleted file mode 100644 index 35fc8f90..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-interfaces.service.j2 +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Kubeguard Network Setup -After=network.target - -[Service] -Type=oneshot -ExecStart=/var/lib/kubeguard/ifupdown.sh up -ExecStop=/var/lib/kubeguard/ifupdown.sh down -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 deleted file mode 100644 index 92300253..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/kubeguard-peer.service.j2 +++ /dev/null @@ -1,37 +0,0 @@ -[Unit] -Description=Kubernetes Network Peer {{ peer }} -After=network.target -Requires=kubeguard-interfaces.service -After=kubeguard-interfaces.service - -{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} -{% set direct_zone = kubeguard.direct_net_zones | default({}) | kubeguard_direct_net_zone(inventory_hostname, peer) -%} -{% if direct_zone %} -{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} -{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} -{% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} -{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} -{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%} -{% set wg_port = hostvars[peer].kubeguard_wireguard_port | default(51820) -%} -{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} -{% endif %} -[Service] -Type=oneshot -{% if direct_zone %} -ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} -ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} -ExecStop=/sbin/ip route del {{ pod_net_peer }} -ExecStop=/sbin/ip link set down dev {{ direct_interface }} -ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} -{% else %} -ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 -ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove -{% endif %} -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 new file mode 100644 index 00000000..c9d96a5a --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 @@ -0,0 +1,37 @@ +[Unit] +Description=Kubernetes Network Peer {{ peer }} +After=network.target +Requires=kubeguard-interface.service +After=kubeguard-interface.service + +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} +{% set direct_zone = kubeguard.direct_net_zones | default({}) | kubeguard_direct_net_zone(inventory_hostname, peer) -%} +{% if direct_zone %} +{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} +{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} +{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} +{% else %} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} +{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} +{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%} +{% set wg_port = hostvars[peer].kubeguard_wireguard_port | default(51820) -%} +{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} +{% endif %} +[Service] +Type=oneshot +{% if direct_zone %} +ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} +ExecStart=/sbin/ip link set up dev {{ direct_interface }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} +ExecStop=/sbin/ip route del {{ pod_net_peer }} +ExecStop=/sbin/ip link set down dev {{ direct_interface }} +ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} +{% else %} +ExecStart=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 +ExecStop=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} remove +{% endif %} +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml new file mode 100644 index 00000000..71ed0d04 --- /dev/null +++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: remove nodes from api server + run_once: true + delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" + command: "kubectl delete node {{ item }}" + +- name: prune network plugin + include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml new file mode 100644 index 00000000..8a8c7752 --- /dev/null +++ b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml @@ -0,0 +1,14 @@ +--- +- name: stop/disable systemd units for stale kubeguard peers + loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" + systemd: + name: "kubeguard-peer-{{ item }}.service" + state: stopped + enabled: no + failed_when: false + +- name: remove systemd units for stale kubeguard peers + loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" + file: + name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service" + state: absent diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_none.yml b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml new file mode 100644 index 00000000..94832c38 --- /dev/null +++ b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml @@ -0,0 +1,2 @@ +--- +## nothing to do here diff --git a/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml index 03b3f205..bcb48960 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml @@ -1,13 +1,13 @@ --- - name: check if kubeguard interface service unit exists stat: - path: /etc/systemd/system/kubeguard-interfaces.service + path: /etc/systemd/system/kubeguard-interface.service register: kubeguard_interface_unit - name: bring down kubeguard interface when: kubeguard_interface_unit.stat.exists systemd: - name: kubeguard-interfaces.service + name: kubeguard-interface.service state: stopped - name: gather list of all kubeguard related service units @@ -15,7 +15,7 @@ path: /etc/systemd/system/ patterns: - "kubeguard-peer-*.service" - - kubeguard-interfaces.service + - kubeguard-interface.service register: kubeguard_units_installed - name: remove all kubeguard related files and directories diff --git a/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml b/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml deleted file mode 100644 index f15058d2..00000000 --- a/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: stop/disable systemd units for stale kubeguard peers - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - systemd: - name: "kubeguard-peer-{{ item }}.service" - state: stopped - enabled: no - failed_when: false - -- name: remove systemd units for stale kubeguard peers - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - file: - name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service" - state: absent -- cgit v1.2.3 From c2d634aaba07caa564056693bc5454f1582426ea Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 20 Jun 2020 17:23:44 +0200 Subject: kubeguard: add kube-router variant --- inventory/group_vars/k8s-chtest/main.yml | 3 + inventory/group_vars/kubernetes-cluster/vars.yml | 1 + .../kubeadm/base/tasks/net_kubeguard.yml | 13 +- .../kubeadm/master/tasks/net_kubeguard.yml | 14 +- .../net_kubeguard/kube-router.0.4.0.yml.j2 | 171 +++++++++++++++++++++ 5 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 (limited to 'roles/kubernetes') diff --git a/inventory/group_vars/k8s-chtest/main.yml b/inventory/group_vars/k8s-chtest/main.yml index 2e5d56d3..a7fe0120 100644 --- a/inventory/group_vars/k8s-chtest/main.yml +++ b/inventory/group_vars/k8s-chtest/main.yml @@ -5,6 +5,9 @@ docker_pkg_name: docker-ce kubernetes_version: 1.18.3 kubernetes_container_runtime: docker kubernetes_network_plugin: kubeguard +kubernetes_network_plugin_replaces_kube_proxy: true +kubernetes_network_plugin_variant: with-kube-router +kubernetes_network_plugin_version: 0.4.0 kubernetes: cluster_name: chtest diff --git a/inventory/group_vars/kubernetes-cluster/vars.yml b/inventory/group_vars/kubernetes-cluster/vars.yml index 2d7a696f..edec4e3e 100644 --- a/inventory/group_vars/kubernetes-cluster/vars.yml +++ b/inventory/group_vars/kubernetes-cluster/vars.yml @@ -1,4 +1,5 @@ --- kubernetes_network_plugin_replaces_kube_proxy: false +kubernetes_network_plugin_variant: default kubernetes_nodelocal_dnscache_ip: 169.254.20.10 diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 37b5030d..2d706a03 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -1,8 +1,10 @@ --- - name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set + when: + - kubernetes_network_plugin_variant != 'with-kube-router' run_once: yes assert: - msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false." + msg: "kubeguard variant '{{ kubernetes_network_plugin_variant }}' can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false or configure a differnt kubernetes_network_plugin_variant." that: - not kubernetes_network_plugin_replaces_kube_proxy @@ -82,3 +84,12 @@ template: src: net_kubeguard/cni.json.j2 dest: /etc/cni/net.d/kubeguard.json + +- name: install packages needed for debugging kube-router + when: kubernetes_network_plugin_variant == 'with-kube-router' + apt: + name: + - iptables + - ipvsadm + - ipset + state: present diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml index 95fee7c8..f364fb5f 100644 --- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml @@ -1,3 +1,13 @@ --- -### kubeguard needs to be deployed before the cluster has been initialized. -### there is nothing more todo here. +- name: install kube-router variant + when: "kubernetes_network_plugin_variant == 'with-kube-router'" + block: + - name: generate kubeguard (kube-router) configuration + template: + src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + + - name: install kubeguard (kube-router) on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kubeguard_apply_result + changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 new file mode 100644 index 00000000..51bfdaae --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 @@ -0,0 +1,171 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=false + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system -- cgit v1.2.3 From b77997ae59aedcf9afb292cf2eb7a49999a33e94 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 20 Jun 2020 19:37:18 +0200 Subject: kubernetes: add network-plugin kube-router --- .../kubeadm/base/tasks/net_kube-router.yml | 8 + .../kubeadm/master/tasks/net_kube-router.yml | 10 + .../templates/net_kube-router/config.0.4.0.yml.j2 | 237 +++++++++++++++++++++ .../kubeadm/prune/tasks/net_kube-router.yml | 2 + roles/kubernetes/kubeadm/reset/tasks/main.yml | 1 + 5 files changed, 258 insertions(+) create mode 100644 roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml create mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml create mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml (limited to 'roles/kubernetes') diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml new file mode 100644 index 00000000..246b20bc --- /dev/null +++ b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml @@ -0,0 +1,8 @@ +--- +- name: install packages needed for debugging kube-router + apt: + name: + - iptables + - ipvsadm + - ipset + state: present diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml new file mode 100644 index 00000000..5368b6f5 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml @@ -0,0 +1,10 @@ +--- +- name: generate kube-router configuration + template: + src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + +- name: install kube-router on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kube_router_apply_result + changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 new file mode 100644 index 00000000..b06687d5 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 @@ -0,0 +1,237 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml new file mode 100644 index 00000000..94832c38 --- /dev/null +++ b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml @@ -0,0 +1,2 @@ +--- +## nothing to do here diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index cf9c125d..8a21fbd5 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -14,6 +14,7 @@ - /etc/kubernetes/network-plugin.yml - /etc/kubernetes/node-local-dns.yml - /etc/kubernetes/addons + - /etc/default/kubelet file: path: "{{ item }}" state: absent -- cgit v1.2.3