summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--common/kubernetes-cleanup.yml37
-rw-r--r--common/kubernetes-cluster-layout.yml46
-rw-r--r--common/kubernetes.yml116
-rw-r--r--dan/ele-thetys.yml3
-rw-r--r--inventory/group_vars/k8s-test-2019vm/main.yml31
-rw-r--r--inventory/group_vars/k8s-test-atlas/main.yml36
-rw-r--r--inventory/group_vars/k8s-test/main.yml49
-rw-r--r--inventory/host_vars/ch-atlas.yml3
-rw-r--r--inventory/host_vars/ele-thetys.yml11
-rw-r--r--inventory/host_vars/sk-2019vm.yml4
-rw-r--r--inventory/hosts.ini18
-rw-r--r--roles/kubernetes/base/tasks/cri_containerd.yml4
-rw-r--r--roles/kubernetes/base/tasks/cri_docker.yml8
-rw-r--r--roles/kubernetes/base/tasks/main.yml7
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/main.yml34
-rw-r--r--roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j236
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/main.yml108
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/primary-master.yml109
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml48
-rw-r--r--roles/kubernetes/kubeadm/master/templates/encryption-config.j213
-rw-r--r--roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j234
-rw-r--r--roles/kubernetes/kubeadm/master/templates/kubeadm.config.j245
-rw-r--r--roles/kubernetes/kubeadm/node/tasks/main.yml30
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml10
-rw-r--r--roles/kubernetes/net/kubeguard/defaults/main.yml2
-rw-r--r--roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service2
-rw-r--r--roles/kubernetes/net/kubeguard/meta/main.yml4
-rw-r--r--roles/kubernetes/net/kubeguard/tasks/add.yml8
-rw-r--r--roles/kubernetes/net/kubeguard/tasks/main.yml9
-rw-r--r--roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j24
-rw-r--r--roles/kubernetes/net/kubeguard/templates/k8s.json.j22
-rw-r--r--roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j215
-rw-r--r--roles/kubernetes/standalone/templates/kubelet.service.override.j21
-rw-r--r--spreadspace/group_vars/k8s-test.yml10
-rw-r--r--spreadspace/k8s-test.yml17
-rw-r--r--spreadspace/s2-k8s-test0.yml7
-rw-r--r--spreadspace/s2-k8s-test1.yml7
-rw-r--r--spreadspace/s2-k8s-test2.yml7
-rw-r--r--spreadspace/s2-k8s-test3.yml7
-rw-r--r--spreadspace/s2-k8s-test4.yml7
40 files changed, 712 insertions, 237 deletions
diff --git a/common/kubernetes-cleanup.yml b/common/kubernetes-cleanup.yml
new file mode 100644
index 00000000..be55d11e
--- /dev/null
+++ b/common/kubernetes-cleanup.yml
@@ -0,0 +1,37 @@
+---
+- name: check for nodes to be removed
+ hosts: _kubernetes_primary_master_
+ tasks:
+ - name: fetch list of current nodes
+ command: kubectl get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: generate list of nodes to be removed
+ loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(groups['_kubernetes_nodes_']) }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_nodes_remove_
+ changed_when: False
+
+ - name: drain superflous nodes
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+
+- name: try to clean superflous nodes
+ hosts: _kubernetes_nodes_remove_
+ roles:
+ - role: kubernetes/kubeadm/reset
+ - role: kubernetes/net/kubeguard
+ when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard'
+ vars:
+ kubeguard_action: remove
+
+- name: remove node from api server
+ hosts: _kubernetes_primary_master_
+ tasks:
+ - name: remove superflous nodes
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ command: "kubectl delete node {{ item }}"
diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml
new file mode 100644
index 00000000..64856fc5
--- /dev/null
+++ b/common/kubernetes-cluster-layout.yml
@@ -0,0 +1,46 @@
+---
+- name: create host groups for kubernetes cluster
+ hosts: "{{ kubernetes_cluster_layout.nodes_group }}"
+ connection: local
+ gather_facts: no
+ run_once: yes
+ tasks:
+ - name: sanity check - fail if masters are not included in nodes
+ assert:
+ msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)"
+ that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0
+
+ - name: sanity check - fail if primary master is not in masters
+ assert:
+ msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
+ that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters
+
+ - name: sanity check - fail on multiple masters if no primary master is configured
+ assert:
+ msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
+ that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined
+
+ - name: create group for all kubernetes nodes
+ loop: "{{ ansible_play_hosts_all }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_nodes_
+ changed_when: False
+
+ - name: create group for kubernetes master nodes
+ loop: "{{ kubernetes_cluster_layout.masters }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_masters_
+ changed_when: False
+
+ - name: create group for kubernetes primary master
+ vars:
+ item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_primary_master_
+ changed_when: False
diff --git a/common/kubernetes.yml b/common/kubernetes.yml
index 311f3ebd..d5b58767 100644
--- a/common/kubernetes.yml
+++ b/common/kubernetes.yml
@@ -4,46 +4,52 @@
gather_facts: no
run_once: yes
tasks:
- - name: check if master group contains only one node
- fail:
- msg: "There must be exactly one master node defined"
- failed_when: (groups['_kubernetes_masters_'] | length) != 1
-
- - name: setup variables
- set_fact:
- kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}"
- kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}"
-
- - name: check whether every node has a net_index assigned
- fail:
- msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}"
- failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0
-
- - name: check whether net indizes are unique
- fail:
- msg: "There are duplicate entries in the net_index table, every net-index is only allowed once"
- failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length)
-
- - name: check whether net indizes are all > 0
- fail:
- msg: "At least one net-index is < 1 (indizes start at 1)"
- failed_when: (kubernetes.net_index.values() | min) < 1
-
- - name: disable bridge and iptables in docker daemon config
- set_fact:
- docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+ - name: sanity checks for kubeguard
+ when: kubernetes_network_plugin == 'kubeguard'
+ block:
+ - name: check whether every node has a node_index assigned
+ assert:
+ msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
+ that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0
+
+ - name: check whether node indizes are unique
+ assert:
+ msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
+ that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)
+
+ - name: check whether node indizes are all > 0
+ assert:
+ msg: "At least one node_index is < 1 (indizes start at 1)"
+ that: (kubeguard.node_index.values() | min) > 0
+
+ - name: check whether overlay node io is configured > 0
+ assert:
+ msg: "For kubeguard to work you need to configure kubernetes_overlay_node_ip"
+ that: kubernetes_overlay_node_ip is defined
+
+ - name: make sure the kubernetes_cri_socket variable is configured correctly
+ when: kubernetes_container_runtime == 'containerd'
+ assert:
+ msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
+ that:
+ - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"
########
-- name: install kubernetes and overlay network
+- name: kubernetes base installation
hosts: _kubernetes_nodes_
roles:
- - role: docker
- role: kubernetes/net/kubeguard
+ when: kubernetes_network_plugin == 'kubeguard'
- role: kubernetes/base
- role: kubernetes/kubeadm/base
-- name: configure kubernetes master
- hosts: _kubernetes_masters_
+- name: configure kubernetes primary master
+ hosts: _kubernetes_primary_master_
+ roles:
+ - role: kubernetes/kubeadm/master
+
+- name: configure kubernetes secondary masters
+ hosts: _kubernetes_masters_:!_kubernetes_primary_master_
roles:
- role: kubernetes/kubeadm/master
@@ -52,50 +58,4 @@
roles:
- role: kubernetes/kubeadm/node
-########
-- name: check for nodes to be removed
- hosts: _kubernetes_masters_
- tasks:
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: generate list of nodes to be removed
- loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
- add_host:
- name: "{{ item }}"
- inventory_dir: "{{ inventory_dir }}"
- group: _kubernetes_nodes_remove_
- changed_when: False
-
- - name: drain superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
-
-- name: try to clean superflous nodes
- hosts: _kubernetes_nodes_remove_
- roles:
- - role: kubernetes/kubeadm/reset
- - role: kubernetes/net/kubeguard
- vars:
- kubeguard_remove_node: yes
-
-- name: remove node from api server
- hosts: _kubernetes_masters_
- tasks:
- - name: remove superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl delete node {{ item }}"
-
- - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
- when: kube_bootstrap_token != ""
- pause:
- seconds: 42
-
- - name: remove bootstrap-token
- when: kube_bootstrap_token != ""
- command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
-
### TODO: add node labels (ie. for ingress daeomnset)
diff --git a/dan/ele-thetys.yml b/dan/ele-thetys.yml
index eca748e9..4024989d 100644
--- a/dan/ele-thetys.yml
+++ b/dan/ele-thetys.yml
@@ -8,4 +8,5 @@
- role: admin-user
- role: blackmagic-desktopvideo
- role: apt-repo/spreadspace
- - role: docker
+ - role: kubernetes/base
+ - role: kubernetes/standalone
diff --git a/inventory/group_vars/k8s-test-2019vm/main.yml b/inventory/group_vars/k8s-test-2019vm/main.yml
new file mode 100644
index 00000000..4c08a1bb
--- /dev/null
+++ b/inventory/group_vars/k8s-test-2019vm/main.yml
@@ -0,0 +1,31 @@
+---
+vm_host: sk-2019vm
+
+install:
+ host: "{{ vm_host }}"
+ mem: 1024
+ numcpu: 2
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: zfs
+ pool: storage
+ name: "{{ inventory_hostname }}"
+ size: 10g
+ interfaces:
+ - bridge: br-public
+ name: primary0
+ autostart: False
+
+network:
+ nameservers: "{{ hostvars[vm_host].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('address') }}"
+# overlay: "{{ (hostvars[vm_host].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}"
diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml
new file mode 100644
index 00000000..9838513d
--- /dev/null
+++ b/inventory/group_vars/k8s-test-atlas/main.yml
@@ -0,0 +1,36 @@
+---
+apt_repo_provider: ffgraz
+
+vm_host: ch-atlas
+
+install:
+ host: "{{ vm_host }}"
+ mem: 1024
+ numcpu: 2
+ disks:
+ primary: /dev/sda
+ scsi:
+ sda:
+ type: lvm
+ vg: "{{ hostvars[vm_host].host_name }}"
+ lv: "{{ inventory_hostname }}"
+ size: 10g
+ interfaces:
+ - bridge: br-public
+ name: primary0
+ mac: "{{ '52:54:00' | random_mac(seed=inventory_hostname + '-primary0') }}"
+ - bridge: br-k8stest
+ name: direct0
+ mac: "{{ '52:54:00' | random_mac(seed=inventory_hostname + '-direct0') }}"
+ autostart: True
+
+network:
+ nameservers: "{{ hostvars[vm_host].vm_host.network.dns }}"
+ domain: "{{ host_domain }}"
+ systemd_link:
+ interfaces: "{{ install.interfaces }}"
+ primary:
+ interface: primary0
+ ip: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}"
+ mask: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}"
+ gateway: "{{ hostvars[vm_host].vm_host.network.bridges.public.gateway }}"
diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml
index 7e5cbe2e..60d381ec 100644
--- a/inventory/group_vars/k8s-test/main.yml
+++ b/inventory/group_vars/k8s-test/main.yml
@@ -1,2 +1,49 @@
---
-zsh_banner: chaos-at-home
+containerd_lvm:
+ vg: "{{ host_name }}"
+ lv: containerd
+ size: 4G
+ fs: ext4
+
+kubernetes_version: 1.17.1
+kubernetes_container_runtime: containerd
+kubernetes_network_plugin: kubeguard
+kubernetes_cri_socket: "unix:///run/containerd/containerd.sock"
+
+kubernetes:
+ cluster_name: k8s-test
+
+ dedicated_master: False
+ api_extra_sans:
+ - 89.106.215.23
+ - k8s-test.spreadspace.org
+
+ pod_ip_range: 172.18.0.0/16
+ pod_ip_range_size: 24
+ service_ip_range: 172.18.192.0/18
+
+# kubernetes_secrets:
+# encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}"
+
+
+kubeguard:
+ ## node_index must be in the range between 1 and 190 -> 189 hosts possible
+ ##
+ ## hardcoded hostnames are not nice but if we do this via host_vars
+ ## the info is spread over multiple files and this makes it more diffcult
+ ## to find mistakes, so it is nicer to keep it in one place...
+ node_index:
+ s2-k8s-test0: 1
+ s2-k8s-test1: 2
+ s2-k8s-test2: 3
+ s2-k8s-test3: 4
+ s2-k8s-test4: 5
+
+ direct_net_zones:
+ atlas:
+ transfer_net: 172.18.191.0/24
+ node_interface:
+ s2-k8s-test0: direct0
+ s2-k8s-test1: direct0
+
+kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}"
diff --git a/inventory/host_vars/ch-atlas.yml b/inventory/host_vars/ch-atlas.yml
index e4acf4da..e9771732 100644
--- a/inventory/host_vars/ch-atlas.yml
+++ b/inventory/host_vars/ch-atlas.yml
@@ -15,4 +15,7 @@ vm_host:
offsets:
ch-keyserver: 3
ch-testvm: 4
+ s2-k8s-test0: 7
+ s2-k8s-test1: 8
r3-vex2: 11
+ k8stest: {}
diff --git a/inventory/host_vars/ele-thetys.yml b/inventory/host_vars/ele-thetys.yml
index 5740b206..51dcf1a0 100644
--- a/inventory/host_vars/ele-thetys.yml
+++ b/inventory/host_vars/ele-thetys.yml
@@ -28,3 +28,14 @@ docker_lvm:
lv: docker
size: 10G
fs: ext4
+
+kubelet_lvm:
+ vg: "{{ host_name }}"
+ lv: kubelet
+ size: 5G
+ fs: ext4
+
+kubernetes_version: 1.17.2
+kubernetes_container_runtime: docker
+kubernetes_standalone_max_pods: 42
+kubernetes_standalone_cni_variant: with-portmap
diff --git a/inventory/host_vars/sk-2019vm.yml b/inventory/host_vars/sk-2019vm.yml
index b2061380..37f9c97d 100644
--- a/inventory/host_vars/sk-2019vm.yml
+++ b/inventory/host_vars/sk-2019vm.yml
@@ -21,6 +21,9 @@ vm_host:
public:
prefix: 192.168.250.254/24
offsets:
+ s2-k8s-test2: 1
+ s2-k8s-test3: 2
+ s2-k8s-test4: 3
sk-torrent: 136
ch-mimas: 143
sk-testvm: 253
@@ -29,6 +32,7 @@ vm_host:
prefix: 178.63.180.136/29
offsets:
sk-torrent: 0
+ s2-k8s-test4: 3
ch-mimas: 6
sk-testvm: 7
diff --git a/inventory/hosts.ini b/inventory/hosts.ini
index f6b39010..88a2d2b1 100644
--- a/inventory/hosts.ini
+++ b/inventory/hosts.ini
@@ -92,6 +92,23 @@ s2-thetys host_name=thetys
s2-dione host_name=dione
s2-helene host_name=helene
+[spreadspace:children]
+k8s-test
+
+
+[k8s-test-atlas]
+s2-k8s-test0 host_name=k8s-test0
+s2-k8s-test1 host_name=k8s-test1
+
+[k8s-test-2019vm]
+s2-k8s-test2 host_name=k8s-test2
+s2-k8s-test3 host_name=k8s-test3
+s2-k8s-test4 host_name=k8s-test4
+
+[k8s-test:children]
+k8s-test-atlas
+k8s-test-2019vm
+
[emc:vars]
host_domain=elev8.at
@@ -215,6 +232,7 @@ r3-cccamp19-av
sk-testvm
sk-torrent
ch-mimas
+s2-k8s-test[0:4]
[hroot]
diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml
new file mode 100644
index 00000000..aa34e6fe
--- /dev/null
+++ b/roles/kubernetes/base/tasks/cri_containerd.yml
@@ -0,0 +1,4 @@
+---
+- name: install containerd
+ include_role:
+ name: containerd
diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml
new file mode 100644
index 00000000..67196f51
--- /dev/null
+++ b/roles/kubernetes/base/tasks/cri_docker.yml
@@ -0,0 +1,8 @@
+---
+- name: disable bridge and iptables in docker daemon config
+ set_fact:
+ docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+
+- name: install docker
+ include_role:
+ name: docker
diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml
index 9c91e347..f1802b0c 100644
--- a/roles/kubernetes/base/tasks/main.yml
+++ b/roles/kubernetes/base/tasks/main.yml
@@ -1,4 +1,7 @@
---
+- name: install container runtime
+ include_tasks: "cri_{{ kubernetes_container_runtime }}.yml"
+
- name: prepare /var/lib/kubelet as LVM
when: kubelet_lvm is defined
import_tasks: lvm.yml
@@ -66,11 +69,11 @@
- name: add dummy group with gid 998
group:
name: app
- gid: 998
+ gid: 990
- name: add dummy user with uid 998
user:
name: app
- uid: 998
+ uid: 990
group: app
password: "!"
diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml
index 2d9b9eed..8e913560 100644
--- a/roles/kubernetes/kubeadm/base/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/main.yml
@@ -2,6 +2,8 @@
- name: install kubeadm and kubectl
apt:
name:
+ - haproxy
+ - hatop
- "kubeadm{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}"
- "kubectl{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}"
state: present
@@ -16,19 +18,43 @@
selection: hold
- name: set kubelet node-ip
+ when: kubernetes_overlay_node_ip is defined
lineinfile:
name: "/etc/default/kubelet"
regexp: '^KUBELET_EXTRA_ARGS='
- line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) | ipaddr(1) | ipaddr("address") }}'
+ line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_overlay_node_ip }}'
+ create: yes
-- name: add kubectl/kubeadm completion for shells
+- name: add kubeadm completion for shells
loop:
- zsh
- bash
blockinfile:
path: "/root/.{{ item }}rc"
create: yes
- marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###"
+ marker: "### {mark} ANSIBLE MANAGED BLOCK for kubeadm ###"
content: |
- source <(kubectl completion {{ item }})
source <(kubeadm completion {{ item }})
+
+- name: configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ register: haproxy_config
+
+- name: (re)start haproxy
+ systemd:
+ name: haproxy
+ state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}"
+ enabled: yes
+
+- name: add hatop config for shells
+ loop:
+ - zsh
+ - bash
+ blockinfile:
+ path: "/root/.{{ item }}rc"
+ create: yes
+ marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###"
+ content: |
+ alias hatop="hatop -s /var/run/haproxy/admin.sock"
diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
new file mode 100644
index 00000000..3de6ac00
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2
@@ -0,0 +1,36 @@
+global
+ log /dev/log local0
+ log /dev/log local1 notice
+ chroot /var/lib/haproxy
+ stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
+ stats timeout 30s
+ user haproxy
+ group haproxy
+ daemon
+
+frontend kube_api
+{% if '_kubernetes_masters_' in group_names %}
+ bind *:6443
+{% else %}
+ bind 127.0.0.1:6443
+{% endif %}
+ mode tcp
+ timeout client 3h
+ default_backend kube_api
+
+backend kube_api
+ mode tcp
+{% if '_kubernetes_masters_' in group_names %}
+ balance first
+{% else %}
+ balance roundrobin
+{% endif %}
+ option log-health-checks
+ option httpchk GET /healthz
+ http-check expect string ok
+ default-server inter 5s fall 3 rise 2
+ timeout connect 5s
+ timeout server 3h
+{% for master in groups['_kubernetes_masters_'] %}
+ server {{ hostvars[master].inventory_hostname }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none
+{% endfor %}
diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml
index 7cc6fe94..9af041b2 100644
--- a/roles/kubernetes/kubeadm/master/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/main.yml
@@ -1,67 +1,25 @@
---
-- name: check if kubeconfig admin.conf already exists
- stat:
- path: /etc/kubernetes/admin.conf
- register: kubeconfig_admin_stats
+# - name: create direcotry for encryption config
+# file:
+# name: /etc/kubernetes/encryption
+# state: directory
+# mode: 0700
-### cluster not yet initialized
+# - name: install encryption config
+# template:
+# src: encryption-config.j2
+# dest: /etc/kubernetes/encryption/config
+# mode: 0600
-- name: create new cluster
- when: kubeconfig_admin_stats.stat.exists == False
- block:
- - name: generate bootstrap token for new cluster
- command: kubeadm token generate
- changed_when: False
- check_mode: no
- register: kubeadm_token_generate
+- name: install primary master
+ include_tasks: primary-master.yml
+ when: "'_kubernetes_primary_master_' in group_names"
- - name: create kubernetes config directory
- file:
- path: /etc/kubernetes
- state: directory
+- name: install secondary masters
+ include_tasks: secondary-masters.yml
+ when: "'_kubernetes_primary_master_' not in group_names"
- ## TODO test whether the generated cluster configs really works - since it has never been used...
- - name: install cluster config for kubeadm
- template:
- src: kubeadm-cluster.config.j2
- dest: /etc/kubernetes/kubeadm-cluster.config
-
- - name: set up kubernetes master
- command: "kubeadm init --config '/etc/kubernetes/kubeadm-cluster.config' --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
- args:
- creates: /etc/kubernetes/pki/ca.crt
- register: kubeadm_init
-
- - name: dump output of kubeadm init to log file
- when: kubeadm_init.changed
- copy:
- content: "{{ kubeadm_init.stdout }}\n"
- dest: /etc/kubernetes/kubeadm-init.log
-
-### cluster is already initialized
-
-- name: prepare cluster for new nodes
- when: kubeconfig_admin_stats.stat.exists == True
- block:
-
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: save list of current nodes
- set_fact:
- kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
-
- - name: create bootstrap token for existing cluster
- when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0
- command: kubeadm token create --ttl 42m
- check_mode: no
- register: kubeadm_token_create
-
-##
- name: check if master is tainted (1/2)
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
@@ -74,28 +32,13 @@
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
- name: remove taint from master node
- when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints"
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
+ when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
- name: add taint for master node
- when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints"
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule"
-
-- name: install openssl
- apt:
- name: openssl
- state: present
+ when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
-- name: get ca certificate digest
- shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
- check_mode: no
- register: kube_ca_openssl
- changed_when: False
-
-- name: set variables needed by kubernetes/nodes to join the cluster
- set_fact:
- kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
- kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
- name: prepare kubectl (1/2)
file:
@@ -107,3 +50,14 @@
dest: /root/.kube/config
src: /etc/kubernetes/admin.conf
state: link
+
+- name: add kubectl completion config for shells
+ with_items:
+ - zsh
+ - bash
+ blockinfile:
+ path: "/root/.{{ item }}rc"
+ create: yes
+ marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###"
+ content: |
+ source <(kubectl completion {{ item }})
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
new file mode 100644
index 00000000..115c8616
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
@@ -0,0 +1,109 @@
+---
+- name: check if kubeconfig kubelet.conf already exists
+ stat:
+ path: /etc/kubernetes/kubelet.conf
+ register: kubeconfig_kubelet_stats
+
+- name: generate kubeadm.config
+ template:
+ src: kubeadm.config.j2
+ dest: /etc/kubernetes/kubeadm.config
+ register: kubeadm_config
+
+### cluster not yet initialized
+
+- name: create new cluster
+ when: not kubeconfig_kubelet_stats.stat.exists
+ block:
+
+ #### kubeadm wants token to come from --config if --config is used
+ #### i think this is stupid -> TODO: send bug report
+ # - name: generate bootstrap token for new cluster
+ # command: kubeadm token generate
+ # changed_when: False
+ # check_mode: no
+ # register: kubeadm_token_generate
+
+ - name: initialize kubernetes master and store log
+ block:
+ - name: initialize kubernetes master
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ args:
+ creates: /etc/kubernetes/pki/ca.crt
+ register: kubeadm_init
+
+ always:
+ - name: dump output of kubeadm init to log file
+ when: kubeadm_init.changed
+ copy:
+ content: "{{ kubeadm_init.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-init.log
+
+ - name: create bootstrap token for existing cluster
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_generate
+
+
+### cluster is already initialized but config has changed
+
+- name: upgrade cluster config
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed
+ block:
+
+ - name: fail for cluster upgrades
+ fail:
+ msg: "upgrading cluster config is currently not supported!"
+
+
+### cluster is already initialized
+
+- name: prepare cluster for new nodes
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: create bootstrap token for existing cluster
+ when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_create
+
+
+## calculate certificate digest
+
+- name: install openssl
+ apt:
+ name: openssl
+ state: present
+
+- name: get ca certificate digest
+ shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
+ args:
+ executable: /bin/bash
+ check_mode: no
+ register: kube_ca_openssl
+ changed_when: False
+
+- name: set variables needed by kubernetes/nodes to join the cluster
+ set_fact:
+ kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
+ kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ loop: "{{ groups['_kubernetes_nodes_'] }}"
+
+## Network Plugin
+
+# - name: install network plugin
+# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
new file mode 100644
index 00000000..c00c3203
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -0,0 +1,48 @@
+---
+- name: fetch secrets needed for secondary master
+ run_once: true
+ delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: upload certs
+ when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm init phase upload-certs --upload-certs
+ check_mode: no
+ register: kubeadm_upload_certs
+
+
+- name: extracting encryption key for certs
+ set_fact:
+ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}"
+
+- name: join kubernetes secondary master node and store log
+ block:
+ - name: join kubernetes secondary master node
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
+ args:
+ creates: /etc/kubernetes/kubelet.conf
+ register: kubeadm_join
+
+ always:
+ - name: dump output of kubeadm join to log file
+ when: kubeadm_join is changed
+ # This is not a handler by design to make sure this action runs at this point of the play.
+ copy: # noqa 503
+ content: "{{ kubeadm_join.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-join.log
+
+ # TODO: acutally check if node has registered
+- name: give the new master(s) a moment to register
+ when: kubeadm_join is changed
+ pause: # noqa 503
+ seconds: 5
diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
new file mode 100644
index 00000000..345c9bf9
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
@@ -0,0 +1,13 @@
+kind: EncryptionConfiguration
+apiVersion: apiserver.config.k8s.io/v1
+resources:
+ - resources:
+ - secrets
+ providers:
+ - secretbox:
+ keys:
+{% for key in kubernetes_secrets.encryption_config_keys %}
+ - name: key{{ loop.index }}
+ secret: {{ key }}
+{% endfor %}
+ - identity: {}
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2
deleted file mode 100644
index 07c4dddd..00000000
--- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #}
-apiVersion: kubeadm.k8s.io/v1beta1
-kind: ClusterConfiguration
-kubernetesVersion: v{{ kubernetes.version }}
-clusterName: {{ kubernetes.cluster_name }}
-certificatesDir: /etc/kubernetes/pki
-{% if kubernetes.api_advertise_ip %}
-controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443"
-{% endif %}
-imageRepository: k8s.gcr.io
-networking:
- dnsDomain: cluster.local
- podSubnet: {{ kubernetes.pod_ip_range }}
- serviceSubnet: {{ kubernetes.service_ip_range }}
-etcd:
- local:
- dataDir: /var/lib/etcd
-apiServer:
-{% if kubernetes.api_extra_sans | length > 0 %}
- certSANs:
-{% for san in kubernetes.api_extra_sans %}
- - {{ san }}
-{% endfor %}
-{% endif %}
- extraArgs:
-{% if kubernetes.api_advertise_ip %}
- advertise-address: {{ kubernetes.api_advertise_ip }}
-{% endif %}
- authorization-mode: Node,RBAC
- timeoutForControlPlane: 4m0s
-controllerManager: {}
-scheduler: {}
-dns:
- type: CoreDNS
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
new file mode 100644
index 00000000..f48a34f3
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
@@ -0,0 +1,45 @@
+{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #}
+{# #}
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+{# TODO: this is ugly but we want to create our own token so we can #}
+{# better control it's lifetime #}
+bootstrapTokens:
+- ttl: "1s"
+localAPIEndpoint:
+ bindPort: 6442
+{% if kubernetes_overlay_node_ip is defined %}
+ advertiseAddress: {{ kubernetes_overlay_node_ip }}
+{% endif %}
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: ClusterConfiguration
+kubernetesVersion: {{ kubernetes_version }}
+clusterName: {{ kubernetes.cluster_name }}
+imageRepository: k8s.gcr.io
+controlPlaneEndpoint: 127.0.0.1:6443
+networking:
+ dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }}
+ podSubnet: {{ kubernetes.pod_ip_range }}
+ serviceSubnet: {{ kubernetes.service_ip_range }}
+apiServer:
+ # extraArgs:
+ # encryption-provider-config: /etc/kubernetes/encryption/config
+ # extraVolumes:
+ # - name: encryption-config
+ # hostPath: /etc/kubernetes/encryption
+ # mountPath: /etc/kubernetes/encryption
+ # readOnly: true
+ # pathType: Directory
+{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %}
+ certSANs: []
+{% else %}
+ certSANs:
+ {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }}
+{% endif %}
+controllerManager:
+ extraArgs:
+ node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}"
+scheduler: {}
+dns:
+ type: CoreDNS
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml
index 9f0057f9..1d5178ea 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/node/tasks/main.yml
@@ -1,18 +1,16 @@
---
-- name: get master vars
- set_fact:
- kube_bootstrap_token: "{{ hostvars[kubernetes_master].kube_bootstrap_token }}"
- kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_master].kube_bootstrap_ca_cert_hash }}"
- kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_master].ansible_default_ipv4.address) }}"
+- name: join kubernetes node and store log
+ block:
+ - name: join kubernetes node
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'"
+ args:
+ creates: /etc/kubernetes/kubelet.conf
+ register: kubeadm_join
-- name: join kubernetes node
- command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}"
- args:
- creates: /etc/kubernetes/kubelet.conf
- register: kubeadm_join
-
-- name: dump output of kubeadm join to log file
- when: kubeadm_join.changed
- copy:
- content: "{{ kubeadm_join.stdout }}\n"
- dest: /etc/kubernetes/kubeadm-join.log
+ always:
+ - name: dump output of kubeadm join to log file
+ when: kubeadm_join is changed
+ # This is not a handler by design to make sure this action runs at this point of the play.
+ copy: # noqa 503
+ content: "{{ kubeadm_join.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-join.log
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index a6d64c7d..f0e88e53 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -1,3 +1,13 @@
---
- name: clean up settings and files created by kubeadm
command: kubeadm reset -f
+
+- name: clean up extra configs and logs
+ loop:
+ - /etc/kubernetes/kubeadm.config
+ - /etc/kubernetes/kubeadm-init.log
+ - /etc/kubernetes/kubeadm-join.log
+ - /etc/kubernetes/pki
+ file:
+ path: "{{ item }}"
+ state: absent
diff --git a/roles/kubernetes/net/kubeguard/defaults/main.yml b/roles/kubernetes/net/kubeguard/defaults/main.yml
new file mode 100644
index 00000000..acabaa25
--- /dev/null
+++ b/roles/kubernetes/net/kubeguard/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+kubeguard_action: add
diff --git a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service
index f45df88a..35fc8f90 100644
--- a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service
+++ b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service
@@ -1,5 +1,5 @@
[Unit]
-Description=Kubernetes Network Interfaces
+Description=Kubeguard Network Setup
After=network.target
[Service]
diff --git a/roles/kubernetes/net/kubeguard/meta/main.yml b/roles/kubernetes/net/kubeguard/meta/main.yml
deleted file mode 100644
index 39c7d694..00000000
--- a/roles/kubernetes/net/kubeguard/meta/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-dependencies:
-- role: wireguard/base
- when: kubeguard_remove_node is not defined
diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml
index b604302b..0658b42c 100644
--- a/roles/kubernetes/net/kubeguard/tasks/add.yml
+++ b/roles/kubernetes/net/kubeguard/tasks/add.yml
@@ -1,4 +1,8 @@
---
+- name: install wireguard
+ import_role:
+ name: wireguard/base
+
- name: create network config directory
file:
name: /var/lib/kubeguard/
@@ -48,7 +52,7 @@
- name: compute list of peers to be added
set_fact:
- kubeguard_peers_to_add: "{{ kubernetes_nodes | difference(inventory_hostname) }}"
+ kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
- name: compute list of peers to be removed
set_fact:
@@ -87,7 +91,7 @@
- name: enable IPv4 forwarding
sysctl:
name: net.ipv4.ip_forward
- value: 1
+ value: '1'
sysctl_set: yes
state: present
reload: yes
diff --git a/roles/kubernetes/net/kubeguard/tasks/main.yml b/roles/kubernetes/net/kubeguard/tasks/main.yml
index 0e87af11..10b0d547 100644
--- a/roles/kubernetes/net/kubeguard/tasks/main.yml
+++ b/roles/kubernetes/net/kubeguard/tasks/main.yml
@@ -1,8 +1,3 @@
---
-- name: add node to overlay network
- include_tasks: add.yml
- when: kubeguard_remove_node is not defined
-
-- name: remove node from overlay network
- include_tasks: remove.yml
- when: kubeguard_remove_node is defined
+- name: add/remove nodes to overlay network
+ include_tasks: "{{ kubeguard_action }}.yml"
diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
index 87849ee9..98b38cf4 100644
--- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
+++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
@@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}"
POD_NET_CIDR="{{ kubernetes.pod_ip_range }}"
-{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%}
+{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%}
BR_IF="kube-br0"
BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}"
BR_IP_CIDR="{{ br_net | ipaddr(1) }}"
BR_NET_CIDR="{{ br_net }}"
TUN_IF="kube-wg0"
-TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[inventory_hostname]) }}"
+TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}"
case "$1" in
diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
index f457ed1c..65b1357a 100644
--- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
+++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
@@ -7,6 +7,6 @@
"hairpinMode": true,
"ipam": {
"type": "host-local",
- "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) }}"
+ "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}"
}
}
diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
index 54251caf..9ca444e8 100644
--- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
+++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
@@ -4,14 +4,15 @@ After=network.target
Requires=kubeguard-interfaces.service
After=kubeguard-interfaces.service
-{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[peer]) -%}
-{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%}
+{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%}
+{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%}
+{% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%}
{% if direct_zone %}
-{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[inventory_hostname]) %}
-{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %}
-{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[peer]) %}
+{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %}
+{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %}
+{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %}
{% else %}
-{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[peer]) -%}
+{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%}
{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%}
{% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%}
{% set wg_port = hostvars[peer].kubeguard_wireguard_port -%}
@@ -22,7 +23,7 @@ Type=oneshot
{% if direct_zone %}
ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }}
ExecStart=/sbin/ip link set up dev {{ direct_interface }}
-ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }}
+ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }}
ExecStop=/sbin/ip route del {{ pod_net_peer }}
ExecStop=/sbin/ip link set down dev {{ direct_interface }}
ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }}
diff --git a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 b/roles/kubernetes/standalone/templates/kubelet.service.override.j2
index 3a88ccd2..75061e73 100644
--- a/roles/kubernetes/standalone/templates/kubelet.service.override.j2
+++ b/roles/kubernetes/standalone/templates/kubelet.service.override.j2
@@ -6,4 +6,5 @@ ExecStart=/usr/bin/kubelet \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
{% endif %}
+ --network-plugin=cni \
--cloud-provider=
diff --git a/spreadspace/group_vars/k8s-test.yml b/spreadspace/group_vars/k8s-test.yml
new file mode 100644
index 00000000..389020c4
--- /dev/null
+++ b/spreadspace/group_vars/k8s-test.yml
@@ -0,0 +1,10 @@
+$ANSIBLE_VAULT;1.2;AES256;spreadspace
+39376666393934306161383231356136393664373164653834393534623766323637666632313632
+3062623430363230333736643164393064346431346534650a393062613232663264383537396663
+39363838303361353766616264643139373062313437383332656162393536646262363561356264
+3333376139663332340a333036303333356333376630656632303464356261643731356336373337
+37303939363239613130363232646262353238333237633766613035643238356636323563636231
+66336562313963323536623732396534313131373338353136623461663033313534636561356131
+65373264636562336261316231656362333630656334373135633663666465376430303135383562
+33653663653132633834626165383832323235323563323334643830643934346466343762613433
+3463
diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml
new file mode 100644
index 00000000..b94f8301
--- /dev/null
+++ b/spreadspace/k8s-test.yml
@@ -0,0 +1,17 @@
+---
+- name: Basic Node Setup
+ hosts: k8s-test
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh
+
+- import_playbook: ../common/kubernetes-cluster-layout.yml
+ vars:
+ kubernetes_cluster_layout:
+ nodes_group: k8s-test
+ masters:
+ - s2-k8s-test0
+
+- import_playbook: ../common/kubernetes.yml
+- import_playbook: ../common/kubernetes-cleanup.yml
diff --git a/spreadspace/s2-k8s-test0.yml b/spreadspace/s2-k8s-test0.yml
new file mode 100644
index 00000000..50b75938
--- /dev/null
+++ b/spreadspace/s2-k8s-test0.yml
@@ -0,0 +1,7 @@
+---
+- name: Basic Setup
+ hosts: s2-k8s-test0
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh
diff --git a/spreadspace/s2-k8s-test1.yml b/spreadspace/s2-k8s-test1.yml
new file mode 100644
index 00000000..6050d269
--- /dev/null
+++ b/spreadspace/s2-k8s-test1.yml
@@ -0,0 +1,7 @@
+---
+- name: Basic Setup
+ hosts: s2-k8s-test1
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh
diff --git a/spreadspace/s2-k8s-test2.yml b/spreadspace/s2-k8s-test2.yml
new file mode 100644
index 00000000..87440a81
--- /dev/null
+++ b/spreadspace/s2-k8s-test2.yml
@@ -0,0 +1,7 @@
+---
+- name: Basic Setup
+ hosts: s2-k8s-test2
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh
diff --git a/spreadspace/s2-k8s-test3.yml b/spreadspace/s2-k8s-test3.yml
new file mode 100644
index 00000000..5c94db94
--- /dev/null
+++ b/spreadspace/s2-k8s-test3.yml
@@ -0,0 +1,7 @@
+---
+- name: Basic Setup
+ hosts: s2-k8s-test3
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh
diff --git a/spreadspace/s2-k8s-test4.yml b/spreadspace/s2-k8s-test4.yml
new file mode 100644
index 00000000..5cf93983
--- /dev/null
+++ b/spreadspace/s2-k8s-test4.yml
@@ -0,0 +1,7 @@
+---
+- name: Basic Setup
+ hosts: s2-k8s-test4
+ roles:
+ - role: base
+ - role: sshd
+ - role: zsh