From dc15a3435465bbeeff4ccc411bb39b5b5b974fd8 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 6 Jan 2020 04:30:35 +0100 Subject: added k8s-test hosts --- inventory/group_vars/k8s-test-2019vm/main.yml | 31 ++++++++++++++++++++ inventory/group_vars/k8s-test-atlas/main.yml | 34 ++++++++++++++++++++++ inventory/group_vars/k8s-test/main.yml | 42 ++++++++++++++++++++++++++- inventory/host_vars/ch-atlas.yml | 3 ++ inventory/host_vars/sk-2019vm.yml | 4 +++ inventory/hosts.ini | 18 ++++++++++++ 6 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 inventory/group_vars/k8s-test-2019vm/main.yml create mode 100644 inventory/group_vars/k8s-test-atlas/main.yml (limited to 'inventory') diff --git a/inventory/group_vars/k8s-test-2019vm/main.yml b/inventory/group_vars/k8s-test-2019vm/main.yml new file mode 100644 index 00000000..2cbe5be1 --- /dev/null +++ b/inventory/group_vars/k8s-test-2019vm/main.yml @@ -0,0 +1,31 @@ +--- +vm_host: sk-2019vm + +install: + host: "{{ vm_host }}" + mem: 1024 + numcpu: 1 + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + pool: storage + name: "{{ inventory_hostname }}" + size: 5g + interfaces: + - bridge: br-public + name: primary0 + autostart: False + +network: + nameservers: "{{ hostvars[vm_host].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('address') }}" +# overlay: "{{ (hostvars[vm_host].vm_host.network.bridges.public.overlay.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.overlay.offsets[inventory_hostname])).split('/')[0] }}" diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml new file mode 100644 index 00000000..3f4fd2fa --- /dev/null +++ b/inventory/group_vars/k8s-test-atlas/main.yml @@ -0,0 +1,34 @@ +--- +apt_repo_provider: ffgraz + +vm_host: ch-atlas + +install: + host: "{{ vm_host }}" + mem: 1024 + numcpu: 1 + disks: + primary: /dev/sda + scsi: + sda: + type: lvm + vg: "{{ hostvars[vm_host].host_name }}" + lv: "{{ inventory_hostname }}" + size: 5g + interfaces: + - bridge: br-public + name: primary0 + - bridge: br-k8stest + name: direct0 + autostart: True + +network: + nameservers: "{{ hostvars[vm_host].vm_host.network.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: + interface: primary0 + ip: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr(hostvars[vm_host].vm_host.network.bridges.public.offsets[inventory_hostname]) | ipaddr('address') }}" + mask: "{{ hostvars[vm_host].vm_host.network.bridges.public.prefix | ipaddr('netmask') }}" + gateway: "{{ hostvars[vm_host].vm_host.network.bridges.public.gateway }}" diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 7e5cbe2e..a28cba9c 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -1,2 +1,42 @@ --- -zsh_banner: chaos-at-home +kubernetes_version: 1.16.4 + +kubernetes: + cluster_name: k8s-test + version: "{{ kubernetes_version }}" + + container_runtime: containerd + network_plugin: kubeguard + + dedicated_master: True + api_advertise_ip: 144.76.160.141 + api_extra_sans: + - k8s-test.chaos-at-home.org + + pod_ip_range: 172.18.0.0/16 + pod_ip_range_size: 24 + service_ip_range: 172.18.192.0/18 + + + +kubeguard: + kube_router_version: 0.4.0-rc1 + + ## host_index must be in the range between 1 and 190 -> 189 hosts possible + ## + ## hardcoded hostnames are not nice but if we do this via host_vars + ## the info is spread over multiple files and this makes it more diffcult + ## to find mistakes, so it is nicer to keep it in one place... + host_index: + s2-k8s-test0: 1 + s2-k8s-test1: 2 + s2-k8s-test2: 3 + s2-k8s-test3: 4 + s2-k8s-test4: 5 + + direct_net_zones: + atlas: + transfer_net: 172.18.191.0/24 + node_interface: + s2-k8s-test0: direct0 + s2-k8s-test1: direct0 diff --git a/inventory/host_vars/ch-atlas.yml b/inventory/host_vars/ch-atlas.yml index e4acf4da..e9771732 100644 --- a/inventory/host_vars/ch-atlas.yml +++ b/inventory/host_vars/ch-atlas.yml @@ -15,4 +15,7 @@ vm_host: offsets: ch-keyserver: 3 ch-testvm: 4 + s2-k8s-test0: 7 + s2-k8s-test1: 8 r3-vex2: 11 + k8stest: {} diff --git a/inventory/host_vars/sk-2019vm.yml b/inventory/host_vars/sk-2019vm.yml index b2061380..37f9c97d 100644 --- a/inventory/host_vars/sk-2019vm.yml +++ b/inventory/host_vars/sk-2019vm.yml @@ -21,6 +21,9 @@ vm_host: public: prefix: 192.168.250.254/24 offsets: + s2-k8s-test2: 1 + s2-k8s-test3: 2 + s2-k8s-test4: 3 sk-torrent: 136 ch-mimas: 143 sk-testvm: 253 @@ -29,6 +32,7 @@ vm_host: prefix: 178.63.180.136/29 offsets: sk-torrent: 0 + s2-k8s-test4: 3 ch-mimas: 6 sk-testvm: 7 diff --git a/inventory/hosts.ini b/inventory/hosts.ini index f6b39010..88a2d2b1 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -92,6 +92,23 @@ s2-thetys host_name=thetys s2-dione host_name=dione s2-helene host_name=helene +[spreadspace:children] +k8s-test + + +[k8s-test-atlas] +s2-k8s-test0 host_name=k8s-test0 +s2-k8s-test1 host_name=k8s-test1 + +[k8s-test-2019vm] +s2-k8s-test2 host_name=k8s-test2 +s2-k8s-test3 host_name=k8s-test3 +s2-k8s-test4 host_name=k8s-test4 + +[k8s-test:children] +k8s-test-atlas +k8s-test-2019vm + [emc:vars] host_domain=elev8.at @@ -215,6 +232,7 @@ r3-cccamp19-av sk-testvm sk-torrent ch-mimas +s2-k8s-test[0:4] [hroot] -- cgit v1.2.3 From fb72bb4358b71d2f3a7b7ffa433409b275ff2f2f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 10 Jan 2020 20:27:41 +0100 Subject: s2-k8s-test(0|1): fixed mac address --- inventory/group_vars/k8s-test-atlas/main.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'inventory') diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml index 3f4fd2fa..4212cf5e 100644 --- a/inventory/group_vars/k8s-test-atlas/main.yml +++ b/inventory/group_vars/k8s-test-atlas/main.yml @@ -18,8 +18,10 @@ install: interfaces: - bridge: br-public name: primary0 + mac: "{{ '52:54:00' | random_mac(seed=inventory_hostname + '-primary0') }}" - bridge: br-k8stest name: direct0 + mac: "{{ '52:54:00' | random_mac(seed=inventory_hostname + '-direct0') }}" autostart: True network: -- cgit v1.2.3 From 949ff8a513464f107ebd2c474078b452e129acf3 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 01:35:42 +0100 Subject: kubernetes: major refactoring of kubernetes playbook structure --- common/kubernetes.yml | 212 +++++++++++++-------- inventory/group_vars/k8s-test/main.yml | 15 +- roles/kubernetes/base/tasks/cri_containerd.yml | 4 + roles/kubernetes/base/tasks/cri_docker.yml | 8 + roles/kubernetes/base/tasks/main.yml | 3 + roles/kubernetes/kubeadm/base/tasks/main.yml | 3 +- roles/kubernetes/net/kubeguard/tasks/add.yml | 4 + .../net/kubeguard/templates/ifupdown.sh.j2 | 4 +- .../kubernetes/net/kubeguard/templates/k8s.json.j2 | 2 +- .../kubeguard/templates/kubeguard-peer.service.j2 | 8 +- spreadspace/k8s-test.yml | 14 ++ spreadspace/s2-k8s-test.yml | 2 - 12 files changed, 181 insertions(+), 98 deletions(-) create mode 100644 roles/kubernetes/base/tasks/cri_containerd.yml create mode 100644 roles/kubernetes/base/tasks/cri_docker.yml create mode 100644 spreadspace/k8s-test.yml delete mode 100644 spreadspace/s2-k8s-test.yml (limited to 'inventory') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 311f3ebd..67f2dd68 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,101 +1,149 @@ --- +- name: create host groups + hosts: localhost + gather_facts: no + tasks: + - name: sanity check - fail if masters are not included in nodes + assert: + msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)" + that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0 + + - name: sanity check - fail if primary master is not in masters + when: kubernetes_cluster_layout.primary_master is defined + assert: + msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" + that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters + + - name: sanity check - fail on multiple masters if no primary master is configured + assert: + msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master" + that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined + + - name: create group for all kubernetes nodes + loop: "{{ kubernetes_cluster_layout.nodes }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_nodes_ + changed_when: False + + - name: create group for kubernetes master nodes + loop: "{{ kubernetes_cluster_layout.masters }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_masters_ + changed_when: False + + - name: create group for kubernetes primary master + add_host: + name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" + inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" + group: _kubernetes_primary_master_ + changed_when: False + - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no run_once: yes tasks: - - name: check if master group contains only one node - fail: - msg: "There must be exactly one master node defined" - failed_when: (groups['_kubernetes_masters_'] | length) != 1 - - - name: setup variables - set_fact: - kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}" - kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}" - - - name: check whether every node has a net_index assigned - fail: - msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}" - failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0 - - - name: check whether net indizes are unique - fail: - msg: "There are duplicate entries in the net_index table, every net-index is only allowed once" - failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length) - - - name: check whether net indizes are all > 0 - fail: - msg: "At least one net-index is < 1 (indizes start at 1)" - failed_when: (kubernetes.net_index.values() | min) < 1 - - - name: disable bridge and iptables in docker daemon config - set_fact: - docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}" + - name: sanity checks for kubeguard + when: kubernetes.network_plugin == 'kubeguard' + block: + - name: check whether every node has a node_index assigned + assert: + msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}" + that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0 + + - name: check whether node indizes are unique + assert: + msg: "There are duplicate entries in the node_index table, every node_index is only allowed once" + that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length) + + - name: check whether node indizes are all > 0 + assert: + msg: "At least one node_index is < 1 (indizes start at 1)" + that: (kubeguard.node_index.values() | min) > 0 + + - name: make sure the kubernetes_cri_socket variable is configured correctly + when: kubernetes.container_runtime == 'containerd' + assert: + msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!" + that: + - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock" + ######## -- name: install kubernetes and overlay network +- name: kubernetes base installation hosts: _kubernetes_nodes_ roles: - - role: docker - role: kubernetes/net/kubeguard + when: kubernetes.network_plugin == 'kubeguard' - role: kubernetes/base - role: kubernetes/kubeadm/base -- name: configure kubernetes master - hosts: _kubernetes_masters_ - roles: - - role: kubernetes/kubeadm/master +# - name: configure kubernetes primary master +# hosts: _kubernetes_primary_master_ +# roles: +# - role: kubernetes/kubeadm/master/common +# - role: kubernetes/kubeadm/master/primary -- name: configure kubernetes non-master nodes - hosts: _kubernetes_nodes_:!_kubernetes_masters_ - roles: - - role: kubernetes/kubeadm/node - -######## -- name: check for nodes to be removed - hosts: _kubernetes_masters_ - tasks: - - name: fetch list of current nodes - command: kubectl get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: generate list of nodes to be removed - loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ inventory_dir }}" - group: _kubernetes_nodes_remove_ - changed_when: False +# - name: configure kubernetes secondary masters +# hosts: _kubernetes_masters_:!_kubernetes_primary_master_ +# roles: +# - role: kubernetes/kubeadm/master/common +# - role: kubernetes/kubeadm/master/secondary - - name: drain superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" +# - name: configure kubernetes non-master nodes +# hosts: _kubernetes_nodes_:!_kubernetes_masters_ +# roles: +# - role: kubernetes/kubeadm/node -- name: try to clean superflous nodes - hosts: _kubernetes_nodes_remove_ - roles: - - role: kubernetes/kubeadm/reset - - role: kubernetes/net/kubeguard - vars: - kubeguard_remove_node: yes - -- name: remove node from api server - hosts: _kubernetes_masters_ - tasks: - - name: remove superflous nodes - loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" - command: "kubectl delete node {{ item }}" - - - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves - when: kube_bootstrap_token != "" - pause: - seconds: 42 - - - name: remove bootstrap-token - when: kube_bootstrap_token != "" - command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" +######## +# - name: check for nodes to be removed +# hosts: _kubernetes_primary_master_ +# tasks: +# - name: fetch list of current nodes +# command: kubectl get nodes -o name +# changed_when: False +# check_mode: no +# register: kubectl_node_list + +# - name: generate list of nodes to be removed +# loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" +# add_host: +# name: "{{ item }}" +# inventory_dir: "{{ inventory_dir }}" +# group: _kubernetes_nodes_remove_ +# changed_when: False + +# - name: drain superflous nodes +# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" +# command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" + +# - name: try to clean superflous nodes +# hosts: _kubernetes_nodes_remove_ +# roles: +# - role: kubernetes/kubeadm/reset +# - role: kubernetes/net/kubeguard +# when: kubernetes.network_plugin == 'kubeguard' +# vars: +# kubeguard_remove_node: yes + +# - name: remove node from api server +# hosts: _kubernetes_primary_master_ +# tasks: +# - name: remove superflous nodes +# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" +# command: "kubectl delete node {{ item }}" + +# - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves +# when: kube_bootstrap_token != "" +# pause: +# seconds: 42 + +# - name: remove bootstrap-token +# when: kube_bootstrap_token != "" +# command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index a28cba9c..7e01d0ab 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -8,26 +8,25 @@ kubernetes: container_runtime: containerd network_plugin: kubeguard - dedicated_master: True - api_advertise_ip: 144.76.160.141 + dedicated_master: False + api_advertise_ip: 89.106.215.23 api_extra_sans: - - k8s-test.chaos-at-home.org + - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 pod_ip_range_size: 24 service_ip_range: 172.18.192.0/18 - kubeguard: kube_router_version: 0.4.0-rc1 - ## host_index must be in the range between 1 and 190 -> 189 hosts possible + ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars ## the info is spread over multiple files and this makes it more diffcult ## to find mistakes, so it is nicer to keep it in one place... - host_index: + node_index: s2-k8s-test0: 1 s2-k8s-test1: 2 s2-k8s-test2: 3 @@ -40,3 +39,7 @@ kubeguard: node_interface: s2-k8s-test0: direct0 s2-k8s-test1: direct0 + + +kubernetes_kubelet_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" +kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml new file mode 100644 index 00000000..aa34e6fe --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_containerd.yml @@ -0,0 +1,4 @@ +--- +- name: install containerd + include_role: + name: containerd diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml new file mode 100644 index 00000000..67196f51 --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -0,0 +1,8 @@ +--- +- name: disable bridge and iptables in docker daemon config + set_fact: + docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}" + +- name: install docker + include_role: + name: docker diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 9c91e347..c3ab1c02 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -1,4 +1,7 @@ --- +- name: install container runtime + include_tasks: "cri_{{ kubernetes_container_runtime }}.yml" + - name: prepare /var/lib/kubelet as LVM when: kubelet_lvm is defined import_tasks: lvm.yml diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 2d9b9eed..76953498 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -16,10 +16,11 @@ selection: hold - name: set kubelet node-ip + when: kubernetes_kubelet_node_ip is defined lineinfile: name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' - line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) | ipaddr(1) | ipaddr("address") }}' + line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' - name: add kubectl/kubeadm completion for shells loop: diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml index b604302b..2f9391fc 100644 --- a/roles/kubernetes/net/kubeguard/tasks/add.yml +++ b/roles/kubernetes/net/kubeguard/tasks/add.yml @@ -1,4 +1,8 @@ --- +- name: install wireguard + include_role: + name: wireguard/base + - name: create network config directory file: name: /var/lib/kubeguard/ diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 index 9c2d8a63..98b38cf4 100644 --- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 +++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 @@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}" POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) -%} +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} BR_IF="kube-br0" BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" BR_IP_CIDR="{{ br_net | ipaddr(1) }}" BR_NET_CIDR="{{ br_net }}" TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[inventory_hostname]) }}" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" case "$1" in diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 index 62900c6a..65b1357a 100644 --- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 +++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 @@ -7,6 +7,6 @@ "hairpinMode": true, "ipam": { "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) }}" + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" } } diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 1bbb3b72..48feb8ba 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,14 +4,14 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[peer]) -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[inventory_hostname]) %} +{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} {% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[peer]) %} +{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} {% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[peer]) -%} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} {% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} {% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} {% set wg_port = hostvars[peer].kubeguard_wireguard_port -%} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml new file mode 100644 index 00000000..50f4ccac --- /dev/null +++ b/spreadspace/k8s-test.yml @@ -0,0 +1,14 @@ +--- +- name: cluster layout + hosts: localhost + gather_facts: no + run_once: yes + tasks: + - name: configure cluster layout + set_fact: + kubernetes_cluster_layout: + nodes: "{{ groups['k8s-test'] }}" + masters: + - s2-k8s-test0 + +- import_playbook: ../common/kubernetes.yml diff --git a/spreadspace/s2-k8s-test.yml b/spreadspace/s2-k8s-test.yml deleted file mode 100644 index aa80d40b..00000000 --- a/spreadspace/s2-k8s-test.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## TODO: implement me! -- cgit v1.2.3 From 25a978d8ce30bdbb62a0a82443501a4f0d2d6cc2 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 03:03:17 +0100 Subject: kuberntes: base installation works now --- common/kubernetes.yml | 12 ++++++------ inventory/group_vars/k8s-test/main.yml | 6 ++---- roles/kubernetes/kubeadm/base/tasks/main.yml | 3 ++- .../kubeadm/master/templates/kubeadm-cluster.config.j2 | 2 +- 4 files changed, 11 insertions(+), 12 deletions(-) (limited to 'inventory') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 45d7cc5d..96b39e5a 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -48,7 +48,7 @@ run_once: yes tasks: - name: sanity checks for kubeguard - when: kubernetes.network_plugin == 'kubeguard' + when: kubernetes_network_plugin == 'kubeguard' block: - name: check whether every node has a node_index assigned assert: @@ -66,7 +66,7 @@ that: (kubeguard.node_index.values() | min) > 0 - name: make sure the kubernetes_cri_socket variable is configured correctly - when: kubernetes.container_runtime == 'containerd' + when: kubernetes_container_runtime == 'containerd' assert: msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!" that: @@ -78,9 +78,9 @@ hosts: _kubernetes_nodes_ roles: - role: kubernetes/net/kubeguard - when: kubernetes.network_plugin == 'kubeguard' - # - role: kubernetes/base - # - role: kubernetes/kubeadm/base + when: kubernetes_network_plugin == 'kubeguard' + - role: kubernetes/base + - role: kubernetes/kubeadm/base # - name: configure kubernetes primary master # hosts: _kubernetes_primary_master_ @@ -126,7 +126,7 @@ # roles: # - role: kubernetes/kubeadm/reset # - role: kubernetes/net/kubeguard -# when: kubernetes.network_plugin == 'kubeguard' +# when: kubernetes_network_plugin == 'kubeguard' # vars: # kubeguard_action: remove diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 7e01d0ab..979cc1a3 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -1,12 +1,10 @@ --- kubernetes_version: 1.16.4 +kubernetes_container_runtime: containerd +kubernetes_network_plugin: kubeguard kubernetes: cluster_name: k8s-test - version: "{{ kubernetes_version }}" - - container_runtime: containerd - network_plugin: kubeguard dedicated_master: False api_advertise_ip: 89.106.215.23 diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 76953498..414fb67a 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -21,6 +21,7 @@ name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' + create: yes - name: add kubectl/kubeadm completion for shells loop: @@ -29,7 +30,7 @@ blockinfile: path: "/root/.{{ item }}rc" create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl/kubeadm ###" content: | source <(kubectl completion {{ item }}) source <(kubeadm completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 index 07c4dddd..5ec18614 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 @@ -1,7 +1,7 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration -kubernetesVersion: v{{ kubernetes.version }} +kubernetesVersion: v{{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} certificatesDir: /etc/kubernetes/pki {% if kubernetes.api_advertise_ip %} -- cgit v1.2.3 From 7dbf0cae4e1a8d77e79b8aafd5bb08780977481f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 04:38:08 +0100 Subject: kubernetes: kubeadm/master node some more cleanup (WIP) --- common/kubernetes.yml | 8 ++-- inventory/group_vars/k8s-test/main.yml | 5 ++- roles/kubernetes/kubeadm/master/tasks/main.yml | 22 +++++----- .../kubeadm/master/tasks/secondary-masters.yml | 4 +- .../kubeadm/master/templates/encryption-config.j2 | 2 +- .../master/templates/kubeadm-cluster.config.j2 | 47 ---------------------- .../kubeadm/master/templates/kubeadm.config.j2 | 41 +++++++++++++++++++ roles/kubernetes/kubeadm/node/tasks/main.yml | 13 ++---- spreadspace/group_vars/k8s-test.yml | 10 +++++ 9 files changed, 77 insertions(+), 75 deletions(-) delete mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 create mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 create mode 100644 spreadspace/group_vars/k8s-test.yml (limited to 'inventory') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 4a9cf65a..d1b4592a 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -82,10 +82,10 @@ - role: kubernetes/base - role: kubernetes/kubeadm/base -# - name: configure kubernetes primary master -# hosts: _kubernetes_primary_master_ -# roles: -# - role: kubernetes/kubeadm/master +- name: configure kubernetes primary master + hosts: _kubernetes_primary_master_ + roles: + - role: kubernetes/kubeadm/master # - name: configure kubernetes secondary masters # hosts: _kubernetes_masters_:!_kubernetes_primary_master_ diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 979cc1a3..91b7b0c2 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -7,7 +7,6 @@ kubernetes: cluster_name: k8s-test dedicated_master: False - api_advertise_ip: 89.106.215.23 api_extra_sans: - k8s-test.spreadspace.org @@ -16,6 +15,10 @@ kubernetes: service_ip_range: 172.18.192.0/18 +# kubernetes_secrets: +# encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" + + kubeguard: kube_router_version: 0.4.0-rc1 diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 9ffdbeee..7f96ff6a 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -1,15 +1,15 @@ --- -- name: create direcotry for encryption config - file: - name: /etc/kubernetes/encryption - state: directory - mode: 0700 - -- name: install encryption config - template: - src: encryption-config.j2 - dest: /etc/kubernetes/encryption/config - mode: 0600 +# - name: create direcotry for encryption config +# file: +# name: /etc/kubernetes/encryption +# state: directory +# mode: 0700 + +# - name: install encryption config +# template: +# src: encryption-config.j2 +# dest: /etc/kubernetes/encryption/config +# mode: 0600 - name: install primary master diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index f7e25fb4..fc85a37d 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -16,7 +16,7 @@ - name: upload certs when: "groups['_kubernetes_masters_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm init phase upload-certs --experimental-upload-certs + command: kubeadm init phase upload-certs --upload-certs check_mode: no register: kubeadm_upload_certs @@ -26,7 +26,7 @@ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - name: join kubernetes secondary master node - command: "kubeadm join 127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} --apiserver-bind-port 6442{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --experimental-control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 index a69ae84b..345c9bf9 100644 --- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 @@ -6,7 +6,7 @@ resources: providers: - secretbox: keys: -{% for key in kubernetes.encryption_config_keys %} +{% for key in kubernetes_secrets.encryption_config_keys %} - name: key{{ loop.index }} secret: {{ key }} {% endfor %} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 deleted file mode 100644 index 78e9d7a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ /dev/null @@ -1,47 +0,0 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} -{# #} -apiVersion: kubeadm.k8s.io/v1beta1 -kind: InitConfiguration -{# TODO: this is ugly but we want to create our own token so we can #} -{# better control it's lifetime #} -bootstrapTokens: -- ttl: "1s" ---- -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: {{ kubernetes_version }} -clusterName: {{ kubernetes.cluster_name }} -imageRepository: k8s.gcr.io -{% if kubernetes.api_advertise_ip %} -controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443" -{% endif %} -networking: - dnsDomain: cluster.local - podSubnet: {{ kubernetes.pod_ip_range }} - serviceSubnet: {{ kubernetes.service_ip_range }} -apiServer: - extraArgs: -{% if kubernetes.api_advertise_ip %} - advertise-address: {{ kubernetes.api_advertise_ip }} -{% endif %} - encryption-provider-config: /etc/kubernetes/encryption/config - extraVolumes: - - name: encryption-config - hostPath: /etc/kubernetes/encryption - mountPath: /etc/kubernetes/encryption - readOnly: true - pathType: Directory -{% if (kubernetes.api_extra_sans | length) == 0 %} - certSANs: [] -{% else %} - certSANs: -{% for san in kubernetes.api_extra_sans %} - - {{ san }} -{% endfor %} -{% endif %} -controllerManager: - extraArgs: - node-cidr-mask-size: "{{ kubernetes_network_node_cidr_size }}" -scheduler: {} -dns: - type: CoreDNS diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 new file mode 100644 index 00000000..e03ea6f6 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -0,0 +1,41 @@ +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +kubernetesVersion: {{ kubernetes_version }} +clusterName: {{ kubernetes.cluster_name }} +imageRepository: k8s.gcr.io +controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +networking: + dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} + podSubnet: {{ kubernetes.pod_ip_range }} + serviceSubnet: {{ kubernetes.service_ip_range }} +apiServer: + extraArgs: + advertise-address: {{ kubernetes_kubelet_node_ip }} + # encryption-provider-config: /etc/kubernetes/encryption/config + # extraVolumes: + # - name: encryption-config + # hostPath: /etc/kubernetes/encryption + # mountPath: /etc/kubernetes/encryption + # readOnly: true + # pathType: Directory +{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} + certSANs: [] +{% else %} + certSANs: + {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} +{% endif %} +controllerManager: + extraArgs: + node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" +scheduler: {} +dns: + type: CoreDNS diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 9f0057f9..2a140099 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,18 +1,13 @@ --- -- name: get master vars - set_fact: - kube_bootstrap_token: "{{ hostvars[kubernetes_master].kube_bootstrap_token }}" - kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_master].kube_bootstrap_ca_cert_hash }}" - kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_master].ansible_default_ipv4.address) }}" - - name: join kubernetes node - command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join - name: dump output of kubeadm join to log file - when: kubeadm_join.changed - copy: + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 content: "{{ kubeadm_join.stdout }}\n" dest: /etc/kubernetes/kubeadm-join.log diff --git a/spreadspace/group_vars/k8s-test.yml b/spreadspace/group_vars/k8s-test.yml new file mode 100644 index 00000000..389020c4 --- /dev/null +++ b/spreadspace/group_vars/k8s-test.yml @@ -0,0 +1,10 @@ +$ANSIBLE_VAULT;1.2;AES256;spreadspace +39376666393934306161383231356136393664373164653834393534623766323637666632313632 +3062623430363230333736643164393064346431346534650a393062613232663264383537396663 +39363838303361353766616264643139373062313437383332656162393536646262363561356264 +3333376139663332340a333036303333356333376630656632303464356261643731356336373337 +37303939363239613130363232646262353238333237633766613035643238356636323563636231 +66336562313963323536623732396534313131373338353136623461663033313534636561356131 +65373264636562336261316231656362333630656334373135633663666465376430303135383562 +33653663653132633834626165383832323235323563323334643830643934346466343762613433 +3463 -- cgit v1.2.3 From 7bdec06600ccf03a94e6c8d14b0b8b00db85ef7b Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 13 Jan 2020 19:37:54 +0100 Subject: kubernetes: nicer layout for playook includes and variables --- common/kubernetes-prepare.yml | 45 ++++++++++++++++++++++++++++++++++ common/kubernetes.yml | 43 +------------------------------- inventory/group_vars/k8s-test/main.yml | 3 +++ spreadspace/k8s-test.yml | 19 ++++++-------- 4 files changed, 57 insertions(+), 53 deletions(-) create mode 100644 common/kubernetes-prepare.yml (limited to 'inventory') diff --git a/common/kubernetes-prepare.yml b/common/kubernetes-prepare.yml new file mode 100644 index 00000000..c91d1962 --- /dev/null +++ b/common/kubernetes-prepare.yml @@ -0,0 +1,45 @@ +--- +- name: create host groups + hosts: "{{ kubernetes_nodes_group }}" + connection: local + gather_facts: no + run_once: yes + tasks: + - name: sanity check - fail if masters are not included in nodes + assert: + msg: "the cluster node group '{{ kubernetes_nodes_group }}' must include *all* nodes (master and non-master)" + that: kubernetes_masters | difference(ansible_play_hosts_all) | length == 0 + + - name: sanity check - fail if primary master is not in masters + when: kubernetes_primary_master is defined + assert: + msg: "kubernetes_masters must include kubernetes_primary_master" + that: kubernetes_primary_master in kubernetes_masters + + - name: sanity check - fail on multiple masters if no primary master is configured + assert: + msg: "For multiple masters to work you need to define kubernetes_primary_master" + that: (kubernetes_masters | length) == 1 or kubernetes_primary_master is defined + + - name: create group for all kubernetes nodes + loop: "{{ ansible_play_hosts_all }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" + group: _kubernetes_nodes_ + changed_when: False + + - name: create group for kubernetes master nodes + loop: "{{ kubernetes_masters }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" + group: _kubernetes_masters_ + changed_when: False + + - name: create group for kubernetes primary master + add_host: + name: "{{ kubernetes_primary_master | default(kubernetes_masters[0]) }}" + inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" + group: _kubernetes_primary_master_ + changed_when: False diff --git a/common/kubernetes.yml b/common/kubernetes.yml index d1b4592a..adcaf5e7 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,46 +1,5 @@ --- -- name: create host groups - hosts: localhost - gather_facts: no - tasks: - - name: sanity check - fail if masters are not included in nodes - assert: - msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)" - that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0 - - - name: sanity check - fail if primary master is not in masters - when: kubernetes_cluster_layout.primary_master is defined - assert: - msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" - that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters - - - name: sanity check - fail on multiple masters if no primary master is configured - assert: - msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master" - that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined - - - name: create group for all kubernetes nodes - loop: "{{ kubernetes_cluster_layout.nodes }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" - group: _kubernetes_nodes_ - changed_when: False - - - name: create group for kubernetes master nodes - loop: "{{ kubernetes_cluster_layout.masters }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" - group: _kubernetes_masters_ - changed_when: False - - - name: create group for kubernetes primary master - add_host: - name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" - inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" - group: _kubernetes_primary_master_ - changed_when: False +- import_playbook: kubernetes-prepare.yml - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 91b7b0c2..a4aeff04 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -3,6 +3,9 @@ kubernetes_version: 1.16.4 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard +kubernetes_masters: + - s2-k8s-test0 + kubernetes: cluster_name: k8s-test diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 50f4ccac..83f19ced 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -1,14 +1,11 @@ --- -- name: cluster layout - hosts: localhost - gather_facts: no - run_once: yes - tasks: - - name: configure cluster layout - set_fact: - kubernetes_cluster_layout: - nodes: "{{ groups['k8s-test'] }}" - masters: - - s2-k8s-test0 +- name: Basic Node Setup + hosts: k8s-test + roles: + - role: base + - role: sshd + - role: zsh - import_playbook: ../common/kubernetes.yml + vars: + kubernetes_nodes_group: k8s-test -- cgit v1.2.3 From 5313897329a4b11187ee2e6b55beba05602e55c3 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 13 Jan 2020 21:28:49 +0100 Subject: fixup! kubernetes: nicer layout for playook includes and variables --- common/kubernetes-cluster-layout.yml | 46 ++++++++++++++++++++++++++++++++++ common/kubernetes-prepare.yml | 45 --------------------------------- common/kubernetes.yml | 2 +- inventory/group_vars/k8s-test/main.yml | 4 --- spreadspace/k8s-test.yml | 5 +++- 5 files changed, 51 insertions(+), 51 deletions(-) create mode 100644 common/kubernetes-cluster-layout.yml delete mode 100644 common/kubernetes-prepare.yml (limited to 'inventory') diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml new file mode 100644 index 00000000..64856fc5 --- /dev/null +++ b/common/kubernetes-cluster-layout.yml @@ -0,0 +1,46 @@ +--- +- name: create host groups for kubernetes cluster + hosts: "{{ kubernetes_cluster_layout.nodes_group }}" + connection: local + gather_facts: no + run_once: yes + tasks: + - name: sanity check - fail if masters are not included in nodes + assert: + msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)" + that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0 + + - name: sanity check - fail if primary master is not in masters + assert: + msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" + that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters + + - name: sanity check - fail on multiple masters if no primary master is configured + assert: + msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master" + that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined + + - name: create group for all kubernetes nodes + loop: "{{ ansible_play_hosts_all }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[item].inventory_dir }}" + group: _kubernetes_nodes_ + changed_when: False + + - name: create group for kubernetes master nodes + loop: "{{ kubernetes_cluster_layout.masters }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[item].inventory_dir }}" + group: _kubernetes_masters_ + changed_when: False + + - name: create group for kubernetes primary master + vars: + item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" + add_host: + name: "{{ item }}" + inventory_dir: "{{ hostvars[item].inventory_dir }}" + group: _kubernetes_primary_master_ + changed_when: False diff --git a/common/kubernetes-prepare.yml b/common/kubernetes-prepare.yml deleted file mode 100644 index c91d1962..00000000 --- a/common/kubernetes-prepare.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: create host groups - hosts: "{{ kubernetes_nodes_group }}" - connection: local - gather_facts: no - run_once: yes - tasks: - - name: sanity check - fail if masters are not included in nodes - assert: - msg: "the cluster node group '{{ kubernetes_nodes_group }}' must include *all* nodes (master and non-master)" - that: kubernetes_masters | difference(ansible_play_hosts_all) | length == 0 - - - name: sanity check - fail if primary master is not in masters - when: kubernetes_primary_master is defined - assert: - msg: "kubernetes_masters must include kubernetes_primary_master" - that: kubernetes_primary_master in kubernetes_masters - - - name: sanity check - fail on multiple masters if no primary master is configured - assert: - msg: "For multiple masters to work you need to define kubernetes_primary_master" - that: (kubernetes_masters | length) == 1 or kubernetes_primary_master is defined - - - name: create group for all kubernetes nodes - loop: "{{ ansible_play_hosts_all }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" - group: _kubernetes_nodes_ - changed_when: False - - - name: create group for kubernetes master nodes - loop: "{{ kubernetes_masters }}" - add_host: - name: "{{ item }}" - inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" - group: _kubernetes_masters_ - changed_when: False - - - name: create group for kubernetes primary master - add_host: - name: "{{ kubernetes_primary_master | default(kubernetes_masters[0]) }}" - inventory_dir: "{{ hostvars[kubernetes_masters[0]].inventory_dir }}" - group: _kubernetes_primary_master_ - changed_when: False diff --git a/common/kubernetes.yml b/common/kubernetes.yml index adcaf5e7..21af11a6 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,5 +1,5 @@ --- -- import_playbook: kubernetes-prepare.yml +- import_playbook: kubernetes-cluster-layout.yml - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index a4aeff04..e1b6570f 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -3,9 +3,6 @@ kubernetes_version: 1.16.4 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard -kubernetes_masters: - - s2-k8s-test0 - kubernetes: cluster_name: k8s-test @@ -17,7 +14,6 @@ kubernetes: pod_ip_range_size: 24 service_ip_range: 172.18.192.0/18 - # kubernetes_secrets: # encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 83f19ced..27599556 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -8,4 +8,7 @@ - import_playbook: ../common/kubernetes.yml vars: - kubernetes_nodes_group: k8s-test + kubernetes_cluster_layout: + nodes_group: k8s-test + masters: + - s2-k8s-test0 -- cgit v1.2.3 From cd946c702fea849b06e0fd6a19ef5597235caf55 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 17:46:08 +0100 Subject: single master kubernetes cluster works now --- common/kubernetes.yml | 18 +++++++++--------- inventory/group_vars/k8s-test-2019vm/main.yml | 4 ++-- inventory/group_vars/k8s-test-atlas/main.yml | 4 ++-- inventory/group_vars/k8s-test/main.yml | 8 +++++++- .../kubernetes/kubeadm/master/tasks/primary-master.yml | 17 +---------------- .../kubeadm/master/templates/kubeadm.config.j2 | 4 ++-- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- spreadspace/k8s-test.yml | 12 ++++++------ 8 files changed, 30 insertions(+), 39 deletions(-) (limited to 'inventory') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index c4f3f81e..aaf23219 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -45,14 +45,14 @@ roles: - role: kubernetes/kubeadm/master -# - name: configure kubernetes secondary masters -# hosts: _kubernetes_masters_:!_kubernetes_primary_master_ -# roles: -# - role: kubernetes/kubeadm/master - -# - name: configure kubernetes non-master nodes -# hosts: _kubernetes_nodes_:!_kubernetes_masters_ -# roles: -# - role: kubernetes/kubeadm/node +- name: configure kubernetes secondary masters + hosts: _kubernetes_masters_:!_kubernetes_primary_master_ + roles: + - role: kubernetes/kubeadm/master + +- name: configure kubernetes non-master nodes + hosts: _kubernetes_nodes_:!_kubernetes_masters_ + roles: + - role: kubernetes/kubeadm/node ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/inventory/group_vars/k8s-test-2019vm/main.yml b/inventory/group_vars/k8s-test-2019vm/main.yml index 2cbe5be1..4c08a1bb 100644 --- a/inventory/group_vars/k8s-test-2019vm/main.yml +++ b/inventory/group_vars/k8s-test-2019vm/main.yml @@ -4,7 +4,7 @@ vm_host: sk-2019vm install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -12,7 +12,7 @@ install: type: zfs pool: storage name: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml index 4212cf5e..9838513d 100644 --- a/inventory/group_vars/k8s-test-atlas/main.yml +++ b/inventory/group_vars/k8s-test-atlas/main.yml @@ -6,7 +6,7 @@ vm_host: ch-atlas install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -14,7 +14,7 @@ install: type: lvm vg: "{{ hostvars[vm_host].host_name }}" lv: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index e1b6570f..0d4d0857 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -1,5 +1,11 @@ --- -kubernetes_version: 1.16.4 +containerd_lvm: + vg: "{{ host_name }}" + lv: containerd + size: 4G + fs: ext4 + +kubernetes_version: 1.17.1 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index 58658794..5efc91b5 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -53,7 +53,6 @@ fail: msg: "upgrading cluster config is currently not supported!" - ### cluster is already initialized - name: prepare cluster for new nodes @@ -76,22 +75,8 @@ check_mode: no register: kubeadm_token_create -## - -## this fixes the kubelet kubeconfig to make use of certificate rotation. This is a bug in -## kubeadm init which was fixed with 1.17 release. TODO: remove this once all cluster have been -## upgraded to 1.17 or newer. -- name: fix kubeconfig of kubelet - lineinfile: - path: /etc/kubernetes/kubelet.conf - backrefs: yes - regexp: '^(\s*)client-{{ item }}(-data)?:' - line: '\1client-{{ item }}: /var/lib/kubelet/pki/kubelet-client-current.pem' - with_items: - - certificate - - key - notify: restart kubelet +## calculate certificate digest - name: install openssl apt: diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index e03ea6f6..3c10e59b 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,13 +1,13 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" --- -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 2a140099..dba2ce30 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: join kubernetes node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 27599556..ed56cb78 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -1,10 +1,10 @@ --- -- name: Basic Node Setup - hosts: k8s-test - roles: - - role: base - - role: sshd - - role: zsh +# - name: Basic Node Setup +# hosts: k8s-test +# roles: +# - role: base +# - role: sshd +# - role: zsh - import_playbook: ../common/kubernetes.yml vars: -- cgit v1.2.3 From 8010f57a73885f7abb5c98c1f77c49baa59a7d16 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:24:09 +0100 Subject: kubernetes: multi master cluster works now --- inventory/group_vars/k8s-test/main.yml | 3 +-- .../kubeadm/master/tasks/primary-master.yml | 30 ++++++++++++---------- .../kubeadm/master/tasks/secondary-masters.yml | 27 ++++++++++--------- .../kubeadm/master/templates/kubeadm.config.j2 | 11 +++++--- roles/kubernetes/kubeadm/node/tasks/main.yml | 25 ++++++++++-------- .../kubeguard/templates/kubeguard-peer.service.j2 | 3 ++- spreadspace/k8s-test.yml | 3 +++ 7 files changed, 60 insertions(+), 42 deletions(-) (limited to 'inventory') diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 0d4d0857..b5863ad1 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -14,6 +14,7 @@ kubernetes: dedicated_master: False api_extra_sans: + - 89.106.215.23 - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 @@ -25,8 +26,6 @@ kubernetes: kubeguard: - kube_router_version: 0.4.0-rc1 - ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index e814e847..115c8616 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -24,35 +24,39 @@ # check_mode: no # register: kubeadm_token_generate - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" -# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log + - name: initialize kubernetes master and store log + block: + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log - name: create bootstrap token for existing cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate + ### cluster is already initialized but config has changed - name: upgrade cluster config when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed block: - - name: fail for cluster upgrades fail: msg: "upgrading cluster config is currently not supported!" + ### cluster is already initialized - name: prepare cluster for new nodes diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 7025ace0..ffe1b4b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -25,18 +25,21 @@ set_fact: kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" -- name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log +- name: join kubernetes secondary master node and store log + block: + - name: join kubernetes secondary master node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log # TODO: acutally check if node has registered - name: give the new master(s) a moment to register diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 3c10e59b..869c809f 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,4 +1,4 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} {# #} apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration @@ -6,20 +6,25 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" +{% if kubernetes_kubelet_node_ip is defined %} +localAPIEndpoint: + advertiseAddress: {{ kubernetes_kubelet_node_ip }} +{% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io +{% if kubernetes_kubelet_node_ip is defined %} controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} apiServer: - extraArgs: - advertise-address: {{ kubernetes_kubelet_node_ip }} + #extraArgs: # encryption-provider-config: /etc/kubernetes/encryption/config # extraVolumes: # - name: encryption-config diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index f7efdd81..61d47111 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,13 +1,16 @@ --- -- name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join +- name: join kubernetes node and store log + block: + - name: join kubernetes node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 6f36b571..9ca444e8 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,6 +4,7 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} @@ -22,7 +23,7 @@ Type=oneshot {% if direct_zone %} ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} ExecStop=/sbin/ip route del {{ pod_net_peer }} ExecStop=/sbin/ip link set down dev {{ direct_interface }} ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 97daa5b0..f21b3fae 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -12,6 +12,9 @@ nodes_group: k8s-test masters: - s2-k8s-test0 + - s2-k8s-test1 + - s2-k8s-test2 + primary_master: s2-k8s-test0 - import_playbook: ../common/kubernetes.yml - import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3 From b64058268b377cc78057b8ba8d3190e520d33053 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:42:27 +0100 Subject: kubernetes: kubernetes_overlay_node_ip --- common/kubernetes.yml | 5 +++++ inventory/group_vars/k8s-test/main.yml | 5 ++--- roles/kubernetes/kubeadm/base/tasks/main.yml | 4 ++-- roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml | 2 +- roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 | 8 ++++---- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- 6 files changed, 15 insertions(+), 11 deletions(-) (limited to 'inventory') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 4fc8cef2..d5b58767 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -22,6 +22,11 @@ msg: "At least one node_index is < 1 (indizes start at 1)" that: (kubeguard.node_index.values() | min) > 0 + - name: check whether overlay node io is configured > 0 + assert: + msg: "For kubeguard to work you need to configure kubernetes_overlay_node_ip" + that: kubernetes_overlay_node_ip is defined + - name: make sure the kubernetes_cri_socket variable is configured correctly when: kubernetes_container_runtime == 'containerd' assert: diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index b5863ad1..60d381ec 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -8,6 +8,7 @@ containerd_lvm: kubernetes_version: 1.17.1 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard +kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" kubernetes: cluster_name: k8s-test @@ -45,6 +46,4 @@ kubeguard: s2-k8s-test0: direct0 s2-k8s-test1: direct0 - -kubernetes_kubelet_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" -kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" +kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 37944915..69a09811 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -16,11 +16,11 @@ selection: hold - name: set kubelet node-ip - when: kubernetes_kubelet_node_ip is defined + when: kubernetes_overlay_node_ip is defined lineinfile: name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' - line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' + line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_overlay_node_ip }}' create: yes - name: add kubeadm completion for shells diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index ffe1b4b2..3c800a87 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -28,7 +28,7 @@ - name: join kubernetes secondary master node and store log block: - name: join kubernetes secondary master node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 869c809f..06d59ced 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -6,9 +6,9 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" -{% if kubernetes_kubelet_node_ip is defined %} +{% if kubernetes_overlay_node_ip is defined %} localAPIEndpoint: - advertiseAddress: {{ kubernetes_kubelet_node_ip }} + advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 @@ -16,8 +16,8 @@ kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io -{% if kubernetes_kubelet_node_ip is defined %} -controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% if kubernetes_overlay_node_ip is defined %} +controlPlaneEndpoint: "{{ kubernetes_overlay_node_ip }}:6443" {% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 61d47111..e4fff98b 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -2,7 +2,7 @@ - name: join kubernetes node and store log block: - name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join -- cgit v1.2.3 From e121406963976aed4dbeadee4adc3e8195a90e36 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 31 Jan 2020 23:53:33 +0100 Subject: kubernetes standalone with docker --- dan/ele-thetys.yml | 3 ++- inventory/host_vars/ele-thetys.yml | 11 +++++++++++ roles/kubernetes/base/tasks/main.yml | 4 ++-- .../standalone/templates/kubelet.service.override.j2 | 1 + 4 files changed, 16 insertions(+), 3 deletions(-) (limited to 'inventory') diff --git a/dan/ele-thetys.yml b/dan/ele-thetys.yml index eca748e9..4024989d 100644 --- a/dan/ele-thetys.yml +++ b/dan/ele-thetys.yml @@ -8,4 +8,5 @@ - role: admin-user - role: blackmagic-desktopvideo - role: apt-repo/spreadspace - - role: docker + - role: kubernetes/base + - role: kubernetes/standalone diff --git a/inventory/host_vars/ele-thetys.yml b/inventory/host_vars/ele-thetys.yml index 5740b206..51dcf1a0 100644 --- a/inventory/host_vars/ele-thetys.yml +++ b/inventory/host_vars/ele-thetys.yml @@ -28,3 +28,14 @@ docker_lvm: lv: docker size: 10G fs: ext4 + +kubelet_lvm: + vg: "{{ host_name }}" + lv: kubelet + size: 5G + fs: ext4 + +kubernetes_version: 1.17.2 +kubernetes_container_runtime: docker +kubernetes_standalone_max_pods: 42 +kubernetes_standalone_cni_variant: with-portmap diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index c3ab1c02..f1802b0c 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -69,11 +69,11 @@ - name: add dummy group with gid 998 group: name: app - gid: 998 + gid: 990 - name: add dummy user with uid 998 user: name: app - uid: 998 + uid: 990 group: app password: "!" diff --git a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 b/roles/kubernetes/standalone/templates/kubelet.service.override.j2 index 3a88ccd2..75061e73 100644 --- a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 +++ b/roles/kubernetes/standalone/templates/kubelet.service.override.j2 @@ -6,4 +6,5 @@ ExecStart=/usr/bin/kubelet \ --container-runtime=remote \ --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ {% endif %} + --network-plugin=cni \ --cloud-provider= -- cgit v1.2.3