From dc15a3435465bbeeff4ccc411bb39b5b5b974fd8 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 6 Jan 2020 04:30:35 +0100 Subject: added k8s-test hosts --- roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service | 2 +- roles/kubernetes/net/kubeguard/meta/main.yml | 4 ---- roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 | 4 ++-- roles/kubernetes/net/kubeguard/templates/k8s.json.j2 | 2 +- .../kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 | 8 ++++---- 5 files changed, 8 insertions(+), 12 deletions(-) delete mode 100644 roles/kubernetes/net/kubeguard/meta/main.yml (limited to 'roles/kubernetes/net') diff --git a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service index f45df88a..35fc8f90 100644 --- a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service +++ b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service @@ -1,5 +1,5 @@ [Unit] -Description=Kubernetes Network Interfaces +Description=Kubeguard Network Setup After=network.target [Service] diff --git a/roles/kubernetes/net/kubeguard/meta/main.yml b/roles/kubernetes/net/kubeguard/meta/main.yml deleted file mode 100644 index 39c7d694..00000000 --- a/roles/kubernetes/net/kubeguard/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: -- role: wireguard/base - when: kubeguard_remove_node is not defined diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 index 87849ee9..9c2d8a63 100644 --- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 +++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 @@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}" POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) -%} BR_IF="kube-br0" BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" BR_IP_CIDR="{{ br_net | ipaddr(1) }}" BR_NET_CIDR="{{ br_net }}" TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[inventory_hostname]) }}" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[inventory_hostname]) }}" case "$1" in diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 index f457ed1c..62900c6a 100644 --- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 +++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 @@ -7,6 +7,6 @@ "hairpinMode": true, "ipam": { "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) }}" + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) }}" } } diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 54251caf..1bbb3b72 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,14 +4,14 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[peer]) -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[peer]) -%} {% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[inventory_hostname]) %} +{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[inventory_hostname]) %} {% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[peer]) %} +{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[peer]) %} {% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[peer]) -%} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[peer]) -%} {% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} {% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} {% set wg_port = hostvars[peer].kubeguard_wireguard_port -%} -- cgit v1.2.3 From 949ff8a513464f107ebd2c474078b452e129acf3 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 01:35:42 +0100 Subject: kubernetes: major refactoring of kubernetes playbook structure --- common/kubernetes.yml | 212 +++++++++++++-------- inventory/group_vars/k8s-test/main.yml | 15 +- roles/kubernetes/base/tasks/cri_containerd.yml | 4 + roles/kubernetes/base/tasks/cri_docker.yml | 8 + roles/kubernetes/base/tasks/main.yml | 3 + roles/kubernetes/kubeadm/base/tasks/main.yml | 3 +- roles/kubernetes/net/kubeguard/tasks/add.yml | 4 + .../net/kubeguard/templates/ifupdown.sh.j2 | 4 +- .../kubernetes/net/kubeguard/templates/k8s.json.j2 | 2 +- .../kubeguard/templates/kubeguard-peer.service.j2 | 8 +- spreadspace/k8s-test.yml | 14 ++ spreadspace/s2-k8s-test.yml | 2 - 12 files changed, 181 insertions(+), 98 deletions(-) create mode 100644 roles/kubernetes/base/tasks/cri_containerd.yml create mode 100644 roles/kubernetes/base/tasks/cri_docker.yml create mode 100644 spreadspace/k8s-test.yml delete mode 100644 spreadspace/s2-k8s-test.yml (limited to 'roles/kubernetes/net') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 311f3ebd..67f2dd68 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,101 +1,149 @@ --- +- name: create host groups + hosts: localhost + gather_facts: no + tasks: + - name: sanity check - fail if masters are not included in nodes + assert: + msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)" + that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0 + + - name: sanity check - fail if primary master is not in masters + when: kubernetes_cluster_layout.primary_master is defined + assert: + msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" + that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters + + - name: sanity check - fail on multiple masters if no primary master is configured + assert: + msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master" + that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined + + - name: create group for all kubernetes nodes + loop: "{{ kubernetes_cluster_layout.nodes }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_nodes_ + changed_when: False + + - name: create group for kubernetes master nodes + loop: "{{ kubernetes_cluster_layout.masters }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_masters_ + changed_when: False + + - name: create group for kubernetes primary master + add_host: + name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_primary_master_ + changed_when: False + - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no run_once: yes tasks: - - name: check if master group contains only one node - fail: - msg: "There must be exactly one master node defined" - failed_when: (groups['_kubernetes_masters_'] | length) != 1 - - - name: setup variables - set_fact: - kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}" - kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}" - - - name: check whether every node has a net_index assigned - fail: - msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}" - failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0 - - - name: check whether net indizes are unique - fail: - msg: "There are duplicate entries in the net_index table, every net-index is only allowed once" - failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length) - - - name: check whether net indizes are all > 0 - fail: - msg: "At least one net-index is < 1 (indizes start at 1)" - failed_when: (kubernetes.net_index.values() | min) < 1 - - - name: disable bridge and iptables in docker daemon config - set_fact: - docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}" + - name: sanity checks for kubeguard + when: kubernetes.network_plugin == 'kubeguard' + block: + - name: check whether every node has a node_index assigned + assert: + msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}" + that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0 + + - name: check whether node indizes are unique + assert: + msg: "There are duplicate entries in the node_index table, every node_index is only allowed once" + that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length) + + - name: check whether node indizes are all > 0 + assert: + msg: "At least one node_index is < 1 (indizes start at 1)" + that: (kubeguard.node_index.values() | min) > 0 + + - name: make sure the kubernetes_cri_socket variable is configured correctly + when: kubernetes.container_runtime == 'containerd' + assert: + msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!" + that: + - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock" + ######## -- name: install kubernetes and overlay network +- name: kubernetes base installation hosts: _kubernetes_nodes_ roles: - - role: docker - role: kubernetes/net/kubeguard + when: kubernetes.network_plugin == 'kubeguard' - role: kubernetes/base - role: kubernetes/kubeadm/base -- name: configure kubernetes master - hosts: _kubernetes_masters_ - roles: - - role: kubernetes/kubeadm/master +# - name: configure kubernetes primary master +# hosts: _kubernetes_primary_master_ +# roles: +# - role: kubernetes/kubeadm/master/common +# - role: kubernetes/kubeadm/master/primary -- name: configure kubernetes non-master nodes - hosts: _kubernetes_nodes_:!_kubernetes_masters_ - roles: - - role: kubernetes/kubeadm/node - -######## -- name: check for nodes to be removed - hosts: _kubernetes_masters_ - tasks: - - name: fetch list of current nodes - command: kubectl get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: generate list of nodes to be removed - loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ inventory_dir }}" - group: _kubernetes_nodes_remove_ - changed_when: False +# - name: configure kubernetes secondary masters +# hosts: _kubernetes_masters_:!_kubernetes_primary_master_ +# roles: +# - role: kubernetes/kubeadm/master/common +# - role: kubernetes/kubeadm/master/secondary - - name: drain superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" +# - name: configure kubernetes non-master nodes +# hosts: _kubernetes_nodes_:!_kubernetes_masters_ +# roles: +# - role: kubernetes/kubeadm/node -- name: try to clean superflous nodes - hosts: _kubernetes_nodes_remove_ - roles: - - role: kubernetes/kubeadm/reset - - role: kubernetes/net/kubeguard - vars: - kubeguard_remove_node: yes - -- name: remove node from api server - hosts: _kubernetes_masters_ - tasks: - - name: remove superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - command: "kubectl delete node {{ item }}" - - - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves - when: kube_bootstrap_token != "" - pause: - seconds: 42 - - - name: remove bootstrap-token - when: kube_bootstrap_token != "" - command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" +######## +# - name: check for nodes to be removed +# hosts: _kubernetes_primary_master_ +# tasks: +# - name: fetch list of current nodes +# command: kubectl get nodes -o name +# changed_when: False +# check_mode: no +# register: kubectl_node_list + +# - name: generate list of nodes to be removed +# loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" +# add_host: +# name: "{{ item }}" +# inventory_dir: "{{ inventory_dir }}" +# group: _kubernetes_nodes_remove_ +# changed_when: False + +# - name: drain superflous nodes +# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" +# command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" + +# - name: try to clean superflous nodes +# hosts: _kubernetes_nodes_remove_ +# roles: +# - role: kubernetes/kubeadm/reset +# - role: kubernetes/net/kubeguard +# when: kubernetes.network_plugin == 'kubeguard' +# vars: +# kubeguard_remove_node: yes + +# - name: remove node from api server +# hosts: _kubernetes_primary_master_ +# tasks: +# - name: remove superflous nodes +# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" +# command: "kubectl delete node {{ item }}" + +# - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves +# when: kube_bootstrap_token != "" +# pause: +# seconds: 42 + +# - name: remove bootstrap-token +# when: kube_bootstrap_token != "" +# command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index a28cba9c..7e01d0ab 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -8,26 +8,25 @@ kubernetes: container_runtime: containerd network_plugin: kubeguard - dedicated_master: True - api_advertise_ip: 144.76.160.141 + dedicated_master: False + api_advertise_ip: 89.106.215.23 api_extra_sans: - - k8s-test.chaos-at-home.org + - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 pod_ip_range_size: 24 service_ip_range: 172.18.192.0/18 - kubeguard: kube_router_version: 0.4.0-rc1 - ## host_index must be in the range between 1 and 190 -> 189 hosts possible + ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars ## the info is spread over multiple files and this makes it more diffcult ## to find mistakes, so it is nicer to keep it in one place... - host_index: + node_index: s2-k8s-test0: 1 s2-k8s-test1: 2 s2-k8s-test2: 3 @@ -40,3 +39,7 @@ kubeguard: node_interface: s2-k8s-test0: direct0 s2-k8s-test1: direct0 + + +kubernetes_kubelet_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" +kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml new file mode 100644 index 00000000..aa34e6fe --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_containerd.yml @@ -0,0 +1,4 @@ +--- +- name: install containerd + include_role: + name: containerd diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml new file mode 100644 index 00000000..67196f51 --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -0,0 +1,8 @@ +--- +- name: disable bridge and iptables in docker daemon config + set_fact: + docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}" + +- name: install docker + include_role: + name: docker diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 9c91e347..c3ab1c02 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -1,4 +1,7 @@ --- +- name: install container runtime + include_tasks: "cri_{{ kubernetes_container_runtime }}.yml" + - name: prepare /var/lib/kubelet as LVM when: kubelet_lvm is defined import_tasks: lvm.yml diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 2d9b9eed..76953498 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -16,10 +16,11 @@ selection: hold - name: set kubelet node-ip + when: kubernetes_kubelet_node_ip is defined lineinfile: name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' - line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) | ipaddr(1) | ipaddr("address") }}' + line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' - name: add kubectl/kubeadm completion for shells loop: diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml index b604302b..2f9391fc 100644 --- a/roles/kubernetes/net/kubeguard/tasks/add.yml +++ b/roles/kubernetes/net/kubeguard/tasks/add.yml @@ -1,4 +1,8 @@ --- +- name: install wireguard + include_role: + name: wireguard/base + - name: create network config directory file: name: /var/lib/kubeguard/ diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 index 9c2d8a63..98b38cf4 100644 --- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 +++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 @@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}" POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) -%} +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} BR_IF="kube-br0" BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" BR_IP_CIDR="{{ br_net | ipaddr(1) }}" BR_NET_CIDR="{{ br_net }}" TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[inventory_hostname]) }}" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" case "$1" in diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 index 62900c6a..65b1357a 100644 --- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 +++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 @@ -7,6 +7,6 @@ "hairpinMode": true, "ipam": { "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) }}" + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" } } diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 1bbb3b72..48feb8ba 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,14 +4,14 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[peer]) -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[inventory_hostname]) %} +{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} {% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[peer]) %} +{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} {% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[peer]) -%} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} {% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} {% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} {% set wg_port = hostvars[peer].kubeguard_wireguard_port -%} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml new file mode 100644 index 00000000..50f4ccac --- /dev/null +++ b/spreadspace/k8s-test.yml @@ -0,0 +1,14 @@ +--- +- name: cluster layout + hosts: localhost + gather_facts: no + run_once: yes + tasks: + - name: configure cluster layout + set_fact: + kubernetes_cluster_layout: + nodes: "{{ groups['k8s-test'] }}" + masters: + - s2-k8s-test0 + +- import_playbook: ../common/kubernetes.yml diff --git a/spreadspace/s2-k8s-test.yml b/spreadspace/s2-k8s-test.yml deleted file mode 100644 index aa80d40b..00000000 --- a/spreadspace/s2-k8s-test.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## TODO: implement me! -- cgit v1.2.3 From 25fb295600a5c24ca0e0c7150cb4cacfbd598718 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 02:28:04 +0100 Subject: kubeguard works now --- common/kubernetes.yml | 6 +++--- roles/kubernetes/net/kubeguard/defaults/main.yml | 2 ++ roles/kubernetes/net/kubeguard/tasks/add.yml | 6 +++--- roles/kubernetes/net/kubeguard/tasks/main.yml | 9 ++------- .../kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 | 8 ++++---- 5 files changed, 14 insertions(+), 17 deletions(-) create mode 100644 roles/kubernetes/net/kubeguard/defaults/main.yml (limited to 'roles/kubernetes/net') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 67f2dd68..45d7cc5d 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -79,8 +79,8 @@ roles: - role: kubernetes/net/kubeguard when: kubernetes.network_plugin == 'kubeguard' - - role: kubernetes/base - - role: kubernetes/kubeadm/base + # - role: kubernetes/base + # - role: kubernetes/kubeadm/base # - name: configure kubernetes primary master # hosts: _kubernetes_primary_master_ @@ -128,7 +128,7 @@ # - role: kubernetes/net/kubeguard # when: kubernetes.network_plugin == 'kubeguard' # vars: -# kubeguard_remove_node: yes +# kubeguard_action: remove # - name: remove node from api server # hosts: _kubernetes_primary_master_ diff --git a/roles/kubernetes/net/kubeguard/defaults/main.yml b/roles/kubernetes/net/kubeguard/defaults/main.yml new file mode 100644 index 00000000..acabaa25 --- /dev/null +++ b/roles/kubernetes/net/kubeguard/defaults/main.yml @@ -0,0 +1,2 @@ +--- +kubeguard_action: add diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml index 2f9391fc..0658b42c 100644 --- a/roles/kubernetes/net/kubeguard/tasks/add.yml +++ b/roles/kubernetes/net/kubeguard/tasks/add.yml @@ -1,6 +1,6 @@ --- - name: install wireguard - include_role: + import_role: name: wireguard/base - name: create network config directory @@ -52,7 +52,7 @@ - name: compute list of peers to be added set_fact: - kubeguard_peers_to_add: "{{ kubernetes_nodes | difference(inventory_hostname) }}" + kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" - name: compute list of peers to be removed set_fact: @@ -91,7 +91,7 @@ - name: enable IPv4 forwarding sysctl: name: net.ipv4.ip_forward - value: 1 + value: '1' sysctl_set: yes state: present reload: yes diff --git a/roles/kubernetes/net/kubeguard/tasks/main.yml b/roles/kubernetes/net/kubeguard/tasks/main.yml index 0e87af11..10b0d547 100644 --- a/roles/kubernetes/net/kubeguard/tasks/main.yml +++ b/roles/kubernetes/net/kubeguard/tasks/main.yml @@ -1,8 +1,3 @@ --- -- name: add node to overlay network - include_tasks: add.yml - when: kubeguard_remove_node is not defined - -- name: remove node from overlay network - include_tasks: remove.yml - when: kubeguard_remove_node is defined +- name: add/remove nodes to overlay network + include_tasks: "{{ kubeguard_action }}.yml" diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 48feb8ba..6f36b571 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -5,11 +5,11 @@ Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} -{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} +{% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} -{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} +{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} +{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} +{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} {% else %} {% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} {% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} -- cgit v1.2.3 From 8010f57a73885f7abb5c98c1f77c49baa59a7d16 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:24:09 +0100 Subject: kubernetes: multi master cluster works now --- inventory/group_vars/k8s-test/main.yml | 3 +-- .../kubeadm/master/tasks/primary-master.yml | 30 ++++++++++++---------- .../kubeadm/master/tasks/secondary-masters.yml | 27 ++++++++++--------- .../kubeadm/master/templates/kubeadm.config.j2 | 11 +++++--- roles/kubernetes/kubeadm/node/tasks/main.yml | 25 ++++++++++-------- .../kubeguard/templates/kubeguard-peer.service.j2 | 3 ++- spreadspace/k8s-test.yml | 3 +++ 7 files changed, 60 insertions(+), 42 deletions(-) (limited to 'roles/kubernetes/net') diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 0d4d0857..b5863ad1 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -14,6 +14,7 @@ kubernetes: dedicated_master: False api_extra_sans: + - 89.106.215.23 - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 @@ -25,8 +26,6 @@ kubernetes: kubeguard: - kube_router_version: 0.4.0-rc1 - ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index e814e847..115c8616 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -24,35 +24,39 @@ # check_mode: no # register: kubeadm_token_generate - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" -# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log + - name: initialize kubernetes master and store log + block: + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log - name: create bootstrap token for existing cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate + ### cluster is already initialized but config has changed - name: upgrade cluster config when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed block: - - name: fail for cluster upgrades fail: msg: "upgrading cluster config is currently not supported!" + ### cluster is already initialized - name: prepare cluster for new nodes diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 7025ace0..ffe1b4b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -25,18 +25,21 @@ set_fact: kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" -- name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log +- name: join kubernetes secondary master node and store log + block: + - name: join kubernetes secondary master node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log # TODO: acutally check if node has registered - name: give the new master(s) a moment to register diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 3c10e59b..869c809f 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,4 +1,4 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} {# #} apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration @@ -6,20 +6,25 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" +{% if kubernetes_kubelet_node_ip is defined %} +localAPIEndpoint: + advertiseAddress: {{ kubernetes_kubelet_node_ip }} +{% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io +{% if kubernetes_kubelet_node_ip is defined %} controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} apiServer: - extraArgs: - advertise-address: {{ kubernetes_kubelet_node_ip }} + #extraArgs: # encryption-provider-config: /etc/kubernetes/encryption/config # extraVolumes: # - name: encryption-config diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index f7efdd81..61d47111 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,13 +1,16 @@ --- -- name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join +- name: join kubernetes node and store log + block: + - name: join kubernetes node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 6f36b571..9ca444e8 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,6 +4,7 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} @@ -22,7 +23,7 @@ Type=oneshot {% if direct_zone %} ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} ExecStop=/sbin/ip route del {{ pod_net_peer }} ExecStop=/sbin/ip link set down dev {{ direct_interface }} ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 97daa5b0..f21b3fae 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -12,6 +12,9 @@ nodes_group: k8s-test masters: - s2-k8s-test0 + - s2-k8s-test1 + - s2-k8s-test2 + primary_master: s2-k8s-test0 - import_playbook: ../common/kubernetes.yml - import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3