summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--chaos-at-home/group_vars/k8s-chtest.yml10
-rw-r--r--chaos-at-home/k8s-chtest.yml37
-rw-r--r--common/kubernetes-cluster-cleanup.yml21
-rw-r--r--common/kubernetes-cluster.yml4
-rw-r--r--dan/k8s-emc.yml4
-rw-r--r--inventory/group_vars/k8s-chtest/main.yml44
-rw-r--r--inventory/group_vars/kubernetes-cluster/vars.yml3
-rw-r--r--inventory/host_vars/ch-hroottest.yml3
-rw-r--r--inventory/host_vars/ch-k8s-m0.yml47
-rw-r--r--inventory/host_vars/ch-k8s-m1.yml47
-rw-r--r--inventory/host_vars/ch-k8s-m2.yml44
-rw-r--r--inventory/host_vars/ch-k8s-w0.yml44
-rw-r--r--inventory/host_vars/ch-k8s-w1.yml44
-rw-r--r--inventory/host_vars/sk-2019vm.yml4
-rw-r--r--inventory/hosts.ini38
-rw-r--r--roles/kubernetes/addons/metrics-server/tasks/main.yml9
-rw-r--r--roles/kubernetes/base/tasks/main.yml4
-rw-r--r--roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py (renamed from roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py)6
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/main.yml23
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml8
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml95
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_none.yml7
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 (renamed from roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2)4
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 (renamed from roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2)6
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 (renamed from roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service)0
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 (renamed from roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2)12
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml10
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml13
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_none.yml2
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/primary-master.yml14
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml8
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2237
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2171
-rw-r--r--roles/kubernetes/kubeadm/node/tasks/main.yml6
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/main.yml9
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml2
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml14
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_none.yml2
-rw-r--r--roles/kubernetes/kubeadm/reset/handlers/main.yml (renamed from roles/kubernetes/net/kubeguard/node/handlers/main.yml)0
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml9
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml (renamed from roles/kubernetes/net/kubeguard/reset/tasks/main.yml)10
-rw-r--r--roles/kubernetes/net/kubeguard/node/tasks/main.yml107
-rw-r--r--roles/wireguard/base/tasks/main.yml5
-rw-r--r--spreadspace/k8s-lwl.yml4
44 files changed, 1017 insertions, 174 deletions
diff --git a/chaos-at-home/group_vars/k8s-chtest.yml b/chaos-at-home/group_vars/k8s-chtest.yml
new file mode 100644
index 00000000..b824f9dd
--- /dev/null
+++ b/chaos-at-home/group_vars/k8s-chtest.yml
@@ -0,0 +1,10 @@
+$ANSIBLE_VAULT;1.2;AES256;chaos-at-home
+64343538336637373635323961366666663233376166326663316362346135353465363432616462
+6530623534623435366466656163343436333064316434650a333232643966653634663531396138
+66643633656133396139353565313834653165353331386637316664383237393237633232393337
+3363626365306538380a333361613761343263356639656632633030626265653730393232653165
+32303034393934303538386664616366613339316265653734656562303232396234623733316532
+32313837623163633663633635396664313732323939663633613238303436656534336432363433
+32623863373239326133303932336361366164383462633730653934333830346636616630356666
+37636638666332393639353738623135313331336166333435363063373733313437613264323138
+39373564363637323034373636323430323437623636623935396237323263383362
diff --git a/chaos-at-home/k8s-chtest.yml b/chaos-at-home/k8s-chtest.yml
new file mode 100644
index 00000000..e3daf681
--- /dev/null
+++ b/chaos-at-home/k8s-chtest.yml
@@ -0,0 +1,37 @@
+---
+- name: Basic Node Setup
+ hosts: k8s-chtest
+ roles:
+ - role: apt-repo/base
+ - role: core/base
+ - role: core/sshd
+ - role: core/zsh
+
+- import_playbook: ../common/kubernetes-cluster-layout.yml
+ vars:
+ kubernetes_cluster_layout:
+ nodes_group: k8s-chtest
+ masters:
+ - ch-k8s-m0
+ - ch-k8s-m1
+ - ch-k8s-m2
+ primary_master: ch-k8s-m0
+
+### hack hack hack...
+- name: cook kubernetes secrets
+ hosts: _kubernetes_nodes_
+ gather_facts: no
+ tasks:
+ - set_fact:
+ kubernetes_secrets_cooked: "{{ kubernetes_secrets }}"
+ - when: external_ip is defined
+ set_fact:
+ external_ip_cooked: "{{ external_ip }}"
+
+- import_playbook: ../common/kubernetes-cluster.yml
+- import_playbook: ../common/kubernetes-cluster-cleanup.yml
+
+- name: install addons
+ hosts: _kubernetes_primary_master_
+ roles:
+ - role: kubernetes/addons/metrics-server
diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml
index 83d6945c..5647e3d6 100644
--- a/common/kubernetes-cluster-cleanup.yml
+++ b/common/kubernetes-cluster-cleanup.yml
@@ -13,24 +13,19 @@
add_host:
name: "{{ item }}"
inventory_dir: "{{ hostvars[item].inventory_dir }}"
- group: _kubernetes_nodes_remove_
+ group: _kubernetes_nodes_prune_
changed_when: False
- name: drain superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+- name: prune superflous nodes from cluster
+ hosts: _kubernetes_nodes_
+ roles:
+ - role: kubernetes/kubeadm/prune
-- name: remove nodes from api server
- hosts: _kubernetes_primary_master_
- tasks:
- - name: remove superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl delete node {{ item }}"
-
-- name: try to clean superflous nodes
- hosts: _kubernetes_nodes_remove_
+- name: wipe superflous nodes
+ hosts: _kubernetes_nodes_prune_
roles:
- role: kubernetes/kubeadm/reset
- - role: kubernetes/net/kubeguard/reset
- when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard'
diff --git a/common/kubernetes-cluster.yml b/common/kubernetes-cluster.yml
index 77fcc02a..fe26d90d 100644
--- a/common/kubernetes-cluster.yml
+++ b/common/kubernetes-cluster.yml
@@ -36,10 +36,6 @@
- name: kubernetes base installation
hosts: _kubernetes_nodes_
roles:
- - role: apt-repo/spreadspace
- when: kubernetes_network_plugin == 'kubeguard'
- - role: kubernetes/net/kubeguard/node
- when: kubernetes_network_plugin == 'kubeguard'
- role: kubernetes/base
- role: kubernetes/kubeadm/base
diff --git a/dan/k8s-emc.yml b/dan/k8s-emc.yml
index 7b4e9158..d81d8358 100644
--- a/dan/k8s-emc.yml
+++ b/dan/k8s-emc.yml
@@ -16,11 +16,13 @@
### hack hack hack...
- name: cook kubernetes secrets
- hosts: _kubernetes_masters_
+ hosts: _kubernetes_nodes_
gather_facts: no
tasks:
- set_fact:
kubernetes_secrets_cooked: "{{ kubernetes_secrets }}"
+ - when: external_ip is defined
+ set_fact:
external_ip_cooked: "{{ external_ip }}"
- import_playbook: ../common/kubernetes-cluster.yml
diff --git a/inventory/group_vars/k8s-chtest/main.yml b/inventory/group_vars/k8s-chtest/main.yml
new file mode 100644
index 00000000..a7fe0120
--- /dev/null
+++ b/inventory/group_vars/k8s-chtest/main.yml
@@ -0,0 +1,44 @@
+---
+docker_pkg_provider: docker-com
+docker_pkg_name: docker-ce
+
+kubernetes_version: 1.18.3
+kubernetes_container_runtime: docker
+kubernetes_network_plugin: kubeguard
+kubernetes_network_plugin_replaces_kube_proxy: true
+kubernetes_network_plugin_variant: with-kube-router
+kubernetes_network_plugin_version: 0.4.0
+
+kubernetes:
+ cluster_name: chtest
+
+ dedicated_master: True
+ api_extra_sans:
+ - 178.63.180.139
+ - 178.63.180.140
+
+ pod_ip_range: 172.18.0.0/16
+ pod_ip_range_size: 24
+ service_ip_range: 172.18.192.0/18
+
+
+kubernetes_secrets:
+ encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}"
+
+
+kubeguard:
+ ## node_index must be in the range between 1 and 190 -> 189 hosts possible
+ ##
+ ## hardcoded hostnames are not nice but if we do this via host_vars
+ ## the info is spread over multiple files and this makes it more diffcult
+ ## to find mistakes, so it is nicer to keep it in one place...
+ node_index:
+ ch-k8s-w0: 1
+ ch-k8s-w1: 2
+ ch-k8s-m0: 100
+ ch-k8s-m1: 101
+ ch-k8s-m2: 102
+
+kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}"
+
+kubernetes_metrics_server_version: 0.3.6
diff --git a/inventory/group_vars/kubernetes-cluster/vars.yml b/inventory/group_vars/kubernetes-cluster/vars.yml
index c1149988..edec4e3e 100644
--- a/inventory/group_vars/kubernetes-cluster/vars.yml
+++ b/inventory/group_vars/kubernetes-cluster/vars.yml
@@ -1,2 +1,5 @@
---
+kubernetes_network_plugin_replaces_kube_proxy: false
+kubernetes_network_plugin_variant: default
+
kubernetes_nodelocal_dnscache_ip: 169.254.20.10
diff --git a/inventory/host_vars/ch-hroottest.yml b/inventory/host_vars/ch-hroottest.yml
index c56845fa..555791ca 100644
--- a/inventory/host_vars/ch-hroottest.yml
+++ b/inventory/host_vars/ch-hroottest.yml
@@ -44,6 +44,9 @@ vm_host:
offsets:
ch-hroottest-vm1: 100
ch-hroottest-obsd: 101
+ ch-k8s-m2: 200
+ ch-k8s-w0: 210
+ ch-k8s-w1: 211
nat: yes
zfs:
default:
diff --git a/inventory/host_vars/ch-k8s-m0.yml b/inventory/host_vars/ch-k8s-m0.yml
new file mode 100644
index 00000000..30239ab3
--- /dev/null
+++ b/inventory/host_vars/ch-k8s-m0.yml
@@ -0,0 +1,47 @@
+---
+_vm_host_: sk-2019vm
+
+install:
+ vm:
+ host: "{{ _vm_host_ }}"
+ mem: 4096
+ numcpu: 2
+ autostart: True
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ name: root
+ size: 20g
+ properties:
+ refreservation: none
+ interfaces:
+ - bridge: br-public
+ name: primary0
+
+network:
+ nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+ overlay: "{{ (hostvars[_vm_host_].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}"
+
+external_ip: "{{ network.primary.overlay }}"
+
+docker_lvm:
+ vg: "{{ host_name }}"
+ lv: docker
+ size: 7G
+ fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 5G
+ fs: ext4
diff --git a/inventory/host_vars/ch-k8s-m1.yml b/inventory/host_vars/ch-k8s-m1.yml
new file mode 100644
index 00000000..30239ab3
--- /dev/null
+++ b/inventory/host_vars/ch-k8s-m1.yml
@@ -0,0 +1,47 @@
+---
+_vm_host_: sk-2019vm
+
+install:
+ vm:
+ host: "{{ _vm_host_ }}"
+ mem: 4096
+ numcpu: 2
+ autostart: True
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ name: root
+ size: 20g
+ properties:
+ refreservation: none
+ interfaces:
+ - bridge: br-public
+ name: primary0
+
+network:
+ nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+ overlay: "{{ (hostvars[_vm_host_].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}"
+
+external_ip: "{{ network.primary.overlay }}"
+
+docker_lvm:
+ vg: "{{ host_name }}"
+ lv: docker
+ size: 7G
+ fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 5G
+ fs: ext4
diff --git a/inventory/host_vars/ch-k8s-m2.yml b/inventory/host_vars/ch-k8s-m2.yml
new file mode 100644
index 00000000..a41c97a8
--- /dev/null
+++ b/inventory/host_vars/ch-k8s-m2.yml
@@ -0,0 +1,44 @@
+---
+_vm_host_: ch-hroottest
+
+install:
+ vm:
+ host: "{{ _vm_host_ }}"
+ mem: 4096
+ numcpu: 2
+ autostart: True
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ name: root
+ size: 20g
+ properties:
+ refreservation: none
+ interfaces:
+ - bridge: br-public
+ name: primary0
+
+network:
+ nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+
+docker_lvm:
+ vg: "{{ host_name }}"
+ lv: docker
+ size: 7G
+ fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 5G
+ fs: ext4
diff --git a/inventory/host_vars/ch-k8s-w0.yml b/inventory/host_vars/ch-k8s-w0.yml
new file mode 100644
index 00000000..5cf6d444
--- /dev/null
+++ b/inventory/host_vars/ch-k8s-w0.yml
@@ -0,0 +1,44 @@
+---
+_vm_host_: ch-hroottest
+
+install:
+ vm:
+ host: "{{ _vm_host_ }}"
+ mem: 4096
+ numcpu: 4
+ autostart: True
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ name: root
+ size: 100g
+ properties:
+ refreservation: none
+ interfaces:
+ - bridge: br-public
+ name: primary0
+
+network:
+ nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+
+docker_lvm:
+ vg: "{{ host_name }}"
+ lv: docker
+ size: 15G
+ fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 15G
+ fs: ext4
diff --git a/inventory/host_vars/ch-k8s-w1.yml b/inventory/host_vars/ch-k8s-w1.yml
new file mode 100644
index 00000000..5cf6d444
--- /dev/null
+++ b/inventory/host_vars/ch-k8s-w1.yml
@@ -0,0 +1,44 @@
+---
+_vm_host_: ch-hroottest
+
+install:
+ vm:
+ host: "{{ _vm_host_ }}"
+ mem: 4096
+ numcpu: 4
+ autostart: True
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ name: root
+ size: 100g
+ properties:
+ refreservation: none
+ interfaces:
+ - bridge: br-public
+ name: primary0
+
+network:
+ nameservers: "{{ hostvars[_vm_host_].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr(hostvars[_vm_host_].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[_vm_host_].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+
+docker_lvm:
+ vg: "{{ host_name }}"
+ lv: docker
+ size: 15G
+ fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 15G
+ fs: ext4
diff --git a/inventory/host_vars/sk-2019vm.yml b/inventory/host_vars/sk-2019vm.yml
index 503995ee..de162712 100644
--- a/inventory/host_vars/sk-2019vm.yml
+++ b/inventory/host_vars/sk-2019vm.yml
@@ -62,6 +62,8 @@ vm_host:
# emc-master: 137
lw-master: 137
ele-gwhetzner: 138
+ ch-k8s-m0: 139
+ ch-k8s-m1: 140
ch-mimas: 142
sk-testvm: 253
nat: yes
@@ -72,6 +74,8 @@ vm_host:
# emc-master: 1
lw-master: 1
ele-gwhetzner: 2
+ ch-k8s-m0: 3
+ ch-k8s-m1: 4
ch-mimas: 6
sk-testvm: 7
zfs:
diff --git a/inventory/hosts.ini b/inventory/hosts.ini
index ad231374..9add78d3 100644
--- a/inventory/hosts.ini
+++ b/inventory/hosts.ini
@@ -32,6 +32,7 @@ ch-hroottest-obsd host_name=hroot-test-obsd
mz-chaos-at-home
chaos-at-home-switches
chaos-at-home-ap
+chaos-at-home-k8s
[mz-chaos-at-home]
mz-router ansible_host=chmz-router
@@ -53,6 +54,13 @@ ch-ap1 host_name=ap1
ch-router
ch-pan
+[chaos-at-home-k8s]
+ch-k8s-m0 host_name=k8s-master0
+ch-k8s-m1 host_name=k8s-master1
+ch-k8s-m2 host_name=k8s-master2
+ch-k8s-w0 host_name=k8s-worker0
+ch-k8s-w1 host_name=k8s-worker1
+
[realraum:vars]
host_domain=realraum.at
@@ -261,6 +269,8 @@ sk-tomnext-nc
sk-tomnext-hp
ch-hroottest-vm1
ch-hroottest-obsd
+ch-k8s-m[0:2]
+ch-k8s-w[0:1]
[hroot]
sk-2019
@@ -313,9 +323,18 @@ ele-dolmetsch-raspi
+### Elevate Festival
+[elevate-festival:children]
+elevate
+k8s-emc
+
+
+
+## Kubernetes
[kubernetes-cluster:children]
k8s-emc
k8s-lwl
+k8s-chtest
[standalone-kubelet]
sk-cloudia
@@ -350,13 +369,6 @@ k8s-emc-distribution
k8s-emc-streamer
-### Elevate Festival
-[elevate-festival:children]
-elevate
-k8s-emc
-
-
-
### Kubernetes Cluster: lendwirbel-live
[k8s-lwl-encoder]
lw-dione
@@ -376,3 +388,15 @@ k8s-lwl-master
k8s-lwl-encoder
k8s-lwl-distribution
k8s-lwl-streamer
+
+
+### Kubernetes Cluster: ch-test
+[k8s-chtest-master]
+ch-k8s-m[0:2]
+
+[k8s-chtest-worker]
+ch-k8s-w[0:1]
+
+[k8s-chtest:children]
+k8s-chtest-master
+k8s-chtest-worker
diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml
index e09106c1..fb725a87 100644
--- a/roles/kubernetes/addons/metrics-server/tasks/main.yml
+++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml
@@ -1,10 +1,15 @@
---
+- name: create base directory for metrics-server addon
+ file:
+ path: /etc/kubernetes/addons/metrics-server
+ state: directory
+
- name: copy config for metrics-server
template:
src: "components.{{ kubernetes_metrics_server_version }}.yml.j2"
- dest: /etc/kubernetes/metrics-server.yml
+ dest: /etc/kubernetes/addons/metrics-server/config.yml
- name: install metrics-server onto the cluster
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/metrics-server.yml
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml
register: kube_metrics_server_apply_result
changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml
index 602266d5..da5f7408 100644
--- a/roles/kubernetes/base/tasks/main.yml
+++ b/roles/kubernetes/base/tasks/main.yml
@@ -46,12 +46,12 @@
{% endif %}
source <(crictl completion)
-- name: add dummy group with gid 998
+- name: add dummy group with gid 990
group:
name: app
gid: 990
-- name: add dummy user with uid 998
+- name: add dummy user with uid 990
user:
name: app
uid: 990
diff --git a/roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py
index 199ff14b..2220e545 100644
--- a/roles/kubernetes/net/kubeguard/node/filter_plugins/kubeguard.py
+++ b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py
@@ -4,7 +4,7 @@ __metaclass__ = type
from ansible import errors
-def direct_net_zone(data, myname, peer):
+def kubeguard_direct_net_zone(data, myname, peer):
try:
zones = []
for zone in data:
@@ -19,14 +19,14 @@ def direct_net_zone(data, myname, peer):
return zones[0]
except Exception as e:
- raise errors.AnsibleFilterError("direct_net_zones(): %s" % str(e))
+ raise errors.AnsibleFilterError("kubeguard_direct_net_zones(): %s" % str(e))
class FilterModule(object):
''' Kubeguard Network Filters '''
filter_map = {
- 'direct_net_zone': direct_net_zone,
+ 'kubeguard_direct_net_zone': kubeguard_direct_net_zone,
}
def filters(self):
diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml
index 2d2bd324..7d882f31 100644
--- a/roles/kubernetes/kubeadm/base/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/main.yml
@@ -3,7 +3,7 @@
apt:
name:
- haproxy
- - hatop
+ - haproxyctl
- "kubeadm={{ kubernetes_version }}-00"
- "kubectl={{ kubernetes_version }}-00"
state: present
@@ -48,16 +48,13 @@
state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}"
enabled: yes
-- name: add hatop config for shells
- loop:
- - zsh
- - bash
- blockinfile:
- path: "/root/.{{ item }}rc"
- create: yes
- marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###"
- content: |
- alias hatop="hatop -s /var/run/haproxy/admin.sock"
+## loading the modules temporarly because kubeadm will complain if they are not there
+# but i don't think it is necessary to make this persistent, also ignoring changes here
+- name: load module br_netfilter to satisfy kubeadm init/join
+ modprobe:
+ name: br_netfilter
+ state: present
+ changed_when: false
-# - name: prepare network plugin
-# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
+- name: prepare network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml
new file mode 100644
index 00000000..246b20bc
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml
@@ -0,0 +1,8 @@
+---
+- name: install packages needed for debugging kube-router
+ apt:
+ name:
+ - iptables
+ - ipvsadm
+ - ipset
+ state: present
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..2d706a03
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
@@ -0,0 +1,95 @@
+---
+- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set
+ when:
+ - kubernetes_network_plugin_variant != 'with-kube-router'
+ run_once: yes
+ assert:
+ msg: "kubeguard variant '{{ kubernetes_network_plugin_variant }}' can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false or configure a differnt kubernetes_network_plugin_variant."
+ that:
+ - not kubernetes_network_plugin_replaces_kube_proxy
+
+
+- name: install wireguard
+ import_role:
+ name: wireguard/base
+
+- name: create network config directory
+ file:
+ name: /var/lib/kubeguard/
+ state: directory
+
+- name: install ifupdown script
+ template:
+ src: net_kubeguard/ifupdown.sh.j2
+ dest: /var/lib/kubeguard/ifupdown.sh
+ mode: 0755
+ # TODO: notify reload... this is unfortunately already to late because
+ # it must probably be brought down by the old version of the script
+
+- name: generate wireguard private key
+ shell: "umask 077; wg genkey > /var/lib/kubeguard/kubeguard-wg0.privatekey"
+ args:
+ creates: /var/lib/kubeguard/kubeguard-wg0.privatekey
+
+- name: fetch wireguard public key
+ shell: "wg pubkey < /var/lib/kubeguard/kubeguard-wg0.privatekey"
+ register: kubeguard_wireguard_pubkey
+ changed_when: false
+ check_mode: no
+
+- name: install systemd service unit for network interface
+ template:
+ src: net_kubeguard/interface.service.j2
+ dest: /etc/systemd/system/kubeguard-interface.service
+ # TODO: notify: reload???
+
+- name: make sure kubeguard interface service is started and enabled
+ systemd:
+ daemon_reload: yes
+ name: kubeguard-interface.service
+ state: started
+ enabled: yes
+
+- name: install systemd units for every kubeguard peer
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
+ loop_control:
+ loop_var: peer
+ template:
+ src: net_kubeguard/peer.service.j2
+ dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service"
+ # TODO: notify restart for peers that change...
+
+- name: make sure kubeguard peer services are started and enabled
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
+ systemd:
+ daemon_reload: yes
+ name: "kubeguard-peer-{{ item }}.service"
+ state: started
+ enabled: yes
+
+- name: enable IPv4 forwarding
+ sysctl:
+ name: net.ipv4.ip_forward
+ value: '1'
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+- name: create cni config directory
+ file:
+ name: /etc/cni/net.d
+ state: directory
+
+- name: install cni config
+ template:
+ src: net_kubeguard/cni.json.j2
+ dest: /etc/cni/net.d/kubeguard.json
+
+- name: install packages needed for debugging kube-router
+ when: kubernetes_network_plugin_variant == 'with-kube-router'
+ apt:
+ name:
+ - iptables
+ - ipvsadm
+ - ipset
+ state: present
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_none.yml b/roles/kubernetes/kubeadm/base/tasks/net_none.yml
new file mode 100644
index 00000000..0924c458
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_none.yml
@@ -0,0 +1,7 @@
+---
+- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set
+ run_once: yes
+ assert:
+ msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false."
+ that:
+ - not kubernetes_network_plugin_replaces_kube_proxy
diff --git a/roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
index 65b1357a..eb9e3d61 100644
--- a/roles/kubernetes/net/kubeguard/node/templates/k8s.json.j2
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
@@ -1,8 +1,8 @@
{
"cniVersion": "0.3.1",
- "name": "k8s",
+ "name": "kubeguard",
"type": "bridge",
- "bridge": "kube-br0",
+ "bridge": "kubeguard-br0",
"isDefaultGateway": true,
"hairpinMode": true,
"ipam": {
diff --git a/roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2
index 98b38cf4..f940d413 100644
--- a/roles/kubernetes/net/kubeguard/node/templates/ifupdown.sh.j2
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2
@@ -9,12 +9,12 @@ INET_IF="{{ ansible_default_ipv4.interface }}"
POD_NET_CIDR="{{ kubernetes.pod_ip_range }}"
{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%}
-BR_IF="kube-br0"
+BR_IF="kubeguard-br0"
BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}"
BR_IP_CIDR="{{ br_net | ipaddr(1) }}"
BR_NET_CIDR="{{ br_net }}"
-TUN_IF="kube-wg0"
+TUN_IF="kubeguard-wg0"
TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}"
@@ -30,7 +30,7 @@ case "$1" in
# bring up wireguard tunnel to other nodes
ip link add dev "$TUN_IF" type wireguard
ip addr add dev "$TUN_IF" "$TUN_IP_CIDR"
- wg set "$TUN_IF" listen-port {{ kubeguard_wireguard_port }} private-key "$CONF_D/$TUN_IF.privatekey"
+ wg set "$TUN_IF" listen-port {{ kubeguard_wireguard_port | default(51820) }} private-key "$CONF_D/$TUN_IF.privatekey"
ip link set up dev "$TUN_IF"
# make pods and service IPs reachable
diff --git a/roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2
index 35fc8f90..35fc8f90 100644
--- a/roles/kubernetes/net/kubeguard/node/files/kubeguard-interfaces.service
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2
diff --git a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2
index 72b39c3f..c9d96a5a 100644
--- a/roles/kubernetes/net/kubeguard/node/templates/kubeguard-peer.service.j2
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2
@@ -1,12 +1,12 @@
[Unit]
Description=Kubernetes Network Peer {{ peer }}
After=network.target
-Requires=kubeguard-interfaces.service
-After=kubeguard-interfaces.service
+Requires=kubeguard-interface.service
+After=kubeguard-interface.service
{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%}
{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%}
-{% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%}
+{% set direct_zone = kubeguard.direct_net_zones | default({}) | kubeguard_direct_net_zone(inventory_hostname, peer) -%}
{% if direct_zone %}
{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %}
{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %}
@@ -15,7 +15,7 @@ After=kubeguard-interfaces.service
{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%}
{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%}
{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%}
-{% set wg_port = hostvars[peer].kubeguard_wireguard_port -%}
+{% set wg_port = hostvars[peer].kubeguard_wireguard_port | default(51820) -%}
{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %}
{% endif %}
[Service]
@@ -28,8 +28,8 @@ ExecStop=/sbin/ip route del {{ pod_net_peer }}
ExecStop=/sbin/ip link set down dev {{ direct_interface }}
ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }}
{% else %}
-ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10
-ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove
+ExecStart=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10
+ExecStop=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} remove
{% endif %}
RemainAfterExit=yes
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
new file mode 100644
index 00000000..5368b6f5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
@@ -0,0 +1,10 @@
+---
+- name: generate kube-router configuration
+ template:
+ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+- name: install kube-router on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kube_router_apply_result
+ changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..f364fb5f
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
@@ -0,0 +1,13 @@
+---
+- name: install kube-router variant
+ when: "kubernetes_network_plugin_variant == 'with-kube-router'"
+ block:
+ - name: generate kubeguard (kube-router) configuration
+ template:
+ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+ - name: install kubeguard (kube-router) on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kubeguard_apply_result
+ changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/master/tasks/net_none.yml
new file mode 100644
index 00000000..bf1a16d5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_none.yml
@@ -0,0 +1,2 @@
+---
+## this "plugin" is for testing purposes only
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
index f24e9ac1..432f7479 100644
--- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
@@ -27,8 +27,8 @@
- name: initialize kubernetes master and store log
block:
- name: initialize kubernetes master
- command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
- # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
register: kubeadm_init
@@ -40,6 +40,12 @@
content: "{{ kubeadm_init.stdout }}\n"
dest: /etc/kubernetes/kubeadm-init.log
+ - name: dump error output of kubeadm init to log file
+ when: kubeadm_init.changed and kubeadm_init.stderr
+ copy:
+ content: "{{ kubeadm_init.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-init.errors
+
- name: create bootstrap token for existing cluster
command: kubeadm token create --ttl 42m
check_mode: no
@@ -119,5 +125,5 @@
## Network Plugin
-# - name: install network plugin
-# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
+- name: install network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
index 31fb31d6..610a8d3f 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -28,7 +28,7 @@
- name: join kubernetes secondary master node and store log
block:
- name: join kubernetes secondary master node
- throttle: 1 ## TODO test this!
+ throttle: 1
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
creates: /etc/kubernetes/kubelet.conf
@@ -42,6 +42,12 @@
content: "{{ kubeadm_join.stdout }}\n"
dest: /etc/kubernetes/kubeadm-join.log
+ - name: dump error output of kubeadm join to log file
+ when: kubeadm_join.changed and kubeadm_join.stderr
+ copy:
+ content: "{{ kubeadm_join.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-join.errors
+
# TODO: acutally check if node has registered
- name: give the new master(s) a moment to register
when: kubeadm_join is changed
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
new file mode 100644
index 00000000..b06687d5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
@@ -0,0 +1,237 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
new file mode 100644
index 00000000..51bfdaae
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
@@ -0,0 +1,171 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=false
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ hostNetwork: true
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml
index 655b1b18..6b3d18ae 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/node/tasks/main.yml
@@ -14,3 +14,9 @@
copy: # noqa 503
content: "{{ kubeadm_join.stdout }}\n"
dest: /etc/kubernetes/kubeadm-join.log
+
+ - name: dump error output of kubeadm join to log file
+ when: kubeadm_join.changed and kubeadm_join.stderr
+ copy:
+ content: "{{ kubeadm_join.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-join.errors
diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml
new file mode 100644
index 00000000..71ed0d04
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: remove nodes from api server
+ run_once: true
+ delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ command: "kubectl delete node {{ item }}"
+
+- name: prune network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml
new file mode 100644
index 00000000..94832c38
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml
@@ -0,0 +1,2 @@
+---
+## nothing to do here
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..8a8c7752
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml
@@ -0,0 +1,14 @@
+---
+- name: stop/disable systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ systemd:
+ name: "kubeguard-peer-{{ item }}.service"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+- name: remove systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ file:
+ name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service"
+ state: absent
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_none.yml b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml
new file mode 100644
index 00000000..94832c38
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml
@@ -0,0 +1,2 @@
+---
+## nothing to do here
diff --git a/roles/kubernetes/net/kubeguard/node/handlers/main.yml b/roles/kubernetes/kubeadm/reset/handlers/main.yml
index bb7fde2b..bb7fde2b 100644
--- a/roles/kubernetes/net/kubeguard/node/handlers/main.yml
+++ b/roles/kubernetes/kubeadm/reset/handlers/main.yml
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index c35e2bfc..8a21fbd5 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -6,9 +6,15 @@
loop:
- /etc/kubernetes/kubeadm.config
- /etc/kubernetes/kubeadm-init.log
+ - /etc/kubernetes/kubeadm-init.errors
- /etc/kubernetes/kubeadm-join.log
+ - /etc/kubernetes/kubeadm-join.errors
- /etc/kubernetes/pki
- /etc/kubernetes/encryption
+ - /etc/kubernetes/network-plugin.yml
+ - /etc/kubernetes/node-local-dns.yml
+ - /etc/kubernetes/addons
+ - /etc/default/kubelet
file:
path: "{{ item }}"
state: absent
@@ -25,3 +31,6 @@
file:
path: "{{ item.path }}"
state: absent
+
+- name: extra-cleanup for kubeguard network plugin
+ import_tasks: net_kubeguard.yml
diff --git a/roles/kubernetes/net/kubeguard/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml
index d24f9eff..bcb48960 100644
--- a/roles/kubernetes/net/kubeguard/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml
@@ -1,21 +1,21 @@
---
- name: check if kubeguard interface service unit exists
stat:
- path: /etc/systemd/system/kubeguard-interfaces.service
+ path: /etc/systemd/system/kubeguard-interface.service
register: kubeguard_interface_unit
- name: bring down kubeguard interface
+ when: kubeguard_interface_unit.stat.exists
systemd:
- name: kubeguard-interfaces.service
+ name: kubeguard-interface.service
state: stopped
- when: kubeguard_interface_unit.stat.exists
- name: gather list of all kubeguard related service units
find:
path: /etc/systemd/system/
patterns:
- - "kubeguard-peer-*.service"
- - kubeguard-interfaces.service
+ - "kubeguard-peer-*.service"
+ - kubeguard-interface.service
register: kubeguard_units_installed
- name: remove all kubeguard related files and directories
diff --git a/roles/kubernetes/net/kubeguard/node/tasks/main.yml b/roles/kubernetes/net/kubeguard/node/tasks/main.yml
deleted file mode 100644
index 0658b42c..00000000
--- a/roles/kubernetes/net/kubeguard/node/tasks/main.yml
+++ /dev/null
@@ -1,107 +0,0 @@
----
-- name: install wireguard
- import_role:
- name: wireguard/base
-
-- name: create network config directory
- file:
- name: /var/lib/kubeguard/
- state: directory
-
-- name: configure wireguard port
- set_fact:
- kubeguard_wireguard_port: "{{ kubernetes.wireguard_port | default(51820) }}"
-
-- name: install ifupdown script
- template:
- src: ifupdown.sh.j2
- dest: /var/lib/kubeguard/ifupdown.sh
- mode: 0755
- # TODO: notify reload... this is unfortunately already to late because
- # it must probably be brought down by the old version of the script
-
-- name: generate wireguard private key
- shell: "umask 077; wg genkey > /var/lib/kubeguard/kube-wg0.privatekey"
- args:
- creates: /var/lib/kubeguard/kube-wg0.privatekey
-
-- name: fetch wireguard public key
- shell: "wg pubkey < /var/lib/kubeguard/kube-wg0.privatekey"
- register: kubeguard_wireguard_pubkey
- changed_when: false
- check_mode: no
-
-- name: install systemd service unit for network interfaces
- copy:
- src: kubeguard-interfaces.service
- dest: /etc/systemd/system/kubeguard-interfaces.service
- # TODO: notify: reload???
-
-- name: make sure kubeguard interfaces service is started and enabled
- systemd:
- daemon_reload: yes
- name: kubeguard-interfaces.service
- state: started
- enabled: yes
-
-- name: get list of currently installed kubeguard peers
- find:
- path: /etc/systemd/system/
- pattern: "kubeguard-peer-*.service"
- register: kubeguard_peers_installed
-
-- name: compute list of peers to be added
- set_fact:
- kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
-
-- name: compute list of peers to be removed
- set_fact:
- kubeguard_peers_to_remove: "{{ kubeguard_peers_installed.files | map(attribute='path') | map('replace', '/etc/systemd/system/kubeguard-peer-', '') | map('replace', '.service', '') | difference(kubeguard_peers_to_add) }}"
-
-- name: stop/disable systemd units for stale kubeguard peers
- loop: "{{ kubeguard_peers_to_remove }}"
- systemd:
- name: "kubeguard-peer-{{ item }}.service"
- state: stopped
- enabled: no
-
-- name: remove systemd units for stale kubeguard peers
- loop: "{{ kubeguard_peers_to_remove }}"
- file:
- name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service"
- state: absent
-
-- name: install systemd units for every kubeguard peer
- loop: "{{ kubeguard_peers_to_add }}"
- loop_control:
- loop_var: peer
- template:
- src: kubeguard-peer.service.j2
- dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service"
- # TODO: notify restart for peers that change...
-
-- name: make sure kubeguard peer services are started and enabled
- loop: "{{ kubeguard_peers_to_add }}"
- systemd:
- daemon_reload: yes
- name: "kubeguard-peer-{{ item }}.service"
- state: started
- enabled: yes
-
-- name: enable IPv4 forwarding
- sysctl:
- name: net.ipv4.ip_forward
- value: '1'
- sysctl_set: yes
- state: present
- reload: yes
-
-- name: create cni config directory
- file:
- name: /etc/cni/net.d
- state: directory
-
-- name: install cni config
- template:
- src: k8s.json.j2
- dest: /etc/cni/net.d/k8s.json
diff --git a/roles/wireguard/base/tasks/main.yml b/roles/wireguard/base/tasks/main.yml
index 6b7cea23..4d60150d 100644
--- a/roles/wireguard/base/tasks/main.yml
+++ b/roles/wireguard/base/tasks/main.yml
@@ -1,4 +1,9 @@
---
+- name: enable spreadspace repo
+ when: (ansible_distribution == 'Debian' and (ansible_distribution_major_version | int) < 11) or (ansible_distribution == 'Ubuntu' and (ansible_distribution_major_version | int) < 20)
+ import_role:
+ name: apt-repo/spreadspace
+
- name: install dkms
import_role:
name: prepare-dkms
diff --git a/spreadspace/k8s-lwl.yml b/spreadspace/k8s-lwl.yml
index 17d21601..902d833d 100644
--- a/spreadspace/k8s-lwl.yml
+++ b/spreadspace/k8s-lwl.yml
@@ -16,11 +16,13 @@
### hack hack hack...
- name: cook kubernetes secrets
- hosts: _kubernetes_masters_
+ hosts: _kubernetes_nodes_
gather_facts: no
tasks:
- set_fact:
kubernetes_secrets_cooked: "{{ kubernetes_secrets }}"
+ - when: external_ip is defined
+ set_fact:
external_ip_cooked: "{{ external_ip }}"
- import_playbook: ../common/kubernetes-cluster.yml