summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--common/kubernetes.yml212
-rw-r--r--inventory/group_vars/k8s-test/main.yml15
-rw-r--r--roles/kubernetes/base/tasks/cri_containerd.yml4
-rw-r--r--roles/kubernetes/base/tasks/cri_docker.yml8
-rw-r--r--roles/kubernetes/base/tasks/main.yml3
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/main.yml3
-rw-r--r--roles/kubernetes/net/kubeguard/tasks/add.yml4
-rw-r--r--roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j24
-rw-r--r--roles/kubernetes/net/kubeguard/templates/k8s.json.j22
-rw-r--r--roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j28
-rw-r--r--spreadspace/k8s-test.yml14
-rw-r--r--spreadspace/s2-k8s-test.yml2
12 files changed, 181 insertions, 98 deletions
diff --git a/common/kubernetes.yml b/common/kubernetes.yml
index 311f3ebd..67f2dd68 100644
--- a/common/kubernetes.yml
+++ b/common/kubernetes.yml
@@ -1,101 +1,149 @@
---
+- name: create host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: sanity check - fail if masters are not included in nodes
+ assert:
+ msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)"
+ that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0
+
+ - name: sanity check - fail if primary master is not in masters
+ when: kubernetes_cluster_layout.primary_master is defined
+ assert:
+ msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
+ that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters
+
+ - name: sanity check - fail on multiple masters if no primary master is configured
+ assert:
+ msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
+ that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined
+
+ - name: create group for all kubernetes nodes
+ loop: "{{ kubernetes_cluster_layout.nodes }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_nodes_
+ changed_when: False
+
+ - name: create group for kubernetes master nodes
+ loop: "{{ kubernetes_cluster_layout.masters }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_masters_
+ changed_when: False
+
+ - name: create group for kubernetes primary master
+ add_host:
+ name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_primary_master_
+ changed_when: False
+
- name: prepare variables and do some sanity checks
hosts: _kubernetes_nodes_
gather_facts: no
run_once: yes
tasks:
- - name: check if master group contains only one node
- fail:
- msg: "There must be exactly one master node defined"
- failed_when: (groups['_kubernetes_masters_'] | length) != 1
-
- - name: setup variables
- set_fact:
- kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}"
- kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}"
-
- - name: check whether every node has a net_index assigned
- fail:
- msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}"
- failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0
-
- - name: check whether net indizes are unique
- fail:
- msg: "There are duplicate entries in the net_index table, every net-index is only allowed once"
- failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length)
-
- - name: check whether net indizes are all > 0
- fail:
- msg: "At least one net-index is < 1 (indizes start at 1)"
- failed_when: (kubernetes.net_index.values() | min) < 1
-
- - name: disable bridge and iptables in docker daemon config
- set_fact:
- docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+ - name: sanity checks for kubeguard
+ when: kubernetes.network_plugin == 'kubeguard'
+ block:
+ - name: check whether every node has a node_index assigned
+ assert:
+ msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
+ that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0
+
+ - name: check whether node indizes are unique
+ assert:
+ msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
+ that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)
+
+ - name: check whether node indizes are all > 0
+ assert:
+ msg: "At least one node_index is < 1 (indizes start at 1)"
+ that: (kubeguard.node_index.values() | min) > 0
+
+ - name: make sure the kubernetes_cri_socket variable is configured correctly
+ when: kubernetes.container_runtime == 'containerd'
+ assert:
+ msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
+ that:
+ - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"
+
########
-- name: install kubernetes and overlay network
+- name: kubernetes base installation
hosts: _kubernetes_nodes_
roles:
- - role: docker
- role: kubernetes/net/kubeguard
+ when: kubernetes.network_plugin == 'kubeguard'
- role: kubernetes/base
- role: kubernetes/kubeadm/base
-- name: configure kubernetes master
- hosts: _kubernetes_masters_
- roles:
- - role: kubernetes/kubeadm/master
+# - name: configure kubernetes primary master
+# hosts: _kubernetes_primary_master_
+# roles:
+# - role: kubernetes/kubeadm/master/common
+# - role: kubernetes/kubeadm/master/primary
-- name: configure kubernetes non-master nodes
- hosts: _kubernetes_nodes_:!_kubernetes_masters_
- roles:
- - role: kubernetes/kubeadm/node
-
-########
-- name: check for nodes to be removed
- hosts: _kubernetes_masters_
- tasks:
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: generate list of nodes to be removed
- loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
- add_host:
- name: "{{ item }}"
- inventory_dir: "{{ inventory_dir }}"
- group: _kubernetes_nodes_remove_
- changed_when: False
+# - name: configure kubernetes secondary masters
+# hosts: _kubernetes_masters_:!_kubernetes_primary_master_
+# roles:
+# - role: kubernetes/kubeadm/master/common
+# - role: kubernetes/kubeadm/master/secondary
- - name: drain superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+# - name: configure kubernetes non-master nodes
+# hosts: _kubernetes_nodes_:!_kubernetes_masters_
+# roles:
+# - role: kubernetes/kubeadm/node
-- name: try to clean superflous nodes
- hosts: _kubernetes_nodes_remove_
- roles:
- - role: kubernetes/kubeadm/reset
- - role: kubernetes/net/kubeguard
- vars:
- kubeguard_remove_node: yes
-
-- name: remove node from api server
- hosts: _kubernetes_masters_
- tasks:
- - name: remove superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl delete node {{ item }}"
-
- - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
- when: kube_bootstrap_token != ""
- pause:
- seconds: 42
-
- - name: remove bootstrap-token
- when: kube_bootstrap_token != ""
- command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
+########
+# - name: check for nodes to be removed
+# hosts: _kubernetes_primary_master_
+# tasks:
+# - name: fetch list of current nodes
+# command: kubectl get nodes -o name
+# changed_when: False
+# check_mode: no
+# register: kubectl_node_list
+
+# - name: generate list of nodes to be removed
+# loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
+# add_host:
+# name: "{{ item }}"
+# inventory_dir: "{{ inventory_dir }}"
+# group: _kubernetes_nodes_remove_
+# changed_when: False
+
+# - name: drain superflous nodes
+# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+# command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+
+# - name: try to clean superflous nodes
+# hosts: _kubernetes_nodes_remove_
+# roles:
+# - role: kubernetes/kubeadm/reset
+# - role: kubernetes/net/kubeguard
+# when: kubernetes.network_plugin == 'kubeguard'
+# vars:
+# kubeguard_remove_node: yes
+
+# - name: remove node from api server
+# hosts: _kubernetes_primary_master_
+# tasks:
+# - name: remove superflous nodes
+# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+# command: "kubectl delete node {{ item }}"
+
+# - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
+# when: kube_bootstrap_token != ""
+# pause:
+# seconds: 42
+
+# - name: remove bootstrap-token
+# when: kube_bootstrap_token != ""
+# command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
### TODO: add node labels (ie. for ingress daeomnset)
diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml
index a28cba9c..7e01d0ab 100644
--- a/inventory/group_vars/k8s-test/main.yml
+++ b/inventory/group_vars/k8s-test/main.yml
@@ -8,26 +8,25 @@ kubernetes:
container_runtime: containerd
network_plugin: kubeguard
- dedicated_master: True
- api_advertise_ip: 144.76.160.141
+ dedicated_master: False
+ api_advertise_ip: 89.106.215.23
api_extra_sans:
- - k8s-test.chaos-at-home.org
+ - k8s-test.spreadspace.org
pod_ip_range: 172.18.0.0/16
pod_ip_range_size: 24
service_ip_range: 172.18.192.0/18
-
kubeguard:
kube_router_version: 0.4.0-rc1
- ## host_index must be in the range between 1 and 190 -> 189 hosts possible
+ ## node_index must be in the range between 1 and 190 -> 189 hosts possible
##
## hardcoded hostnames are not nice but if we do this via host_vars
## the info is spread over multiple files and this makes it more diffcult
## to find mistakes, so it is nicer to keep it in one place...
- host_index:
+ node_index:
s2-k8s-test0: 1
s2-k8s-test1: 2
s2-k8s-test2: 3
@@ -40,3 +39,7 @@ kubeguard:
node_interface:
s2-k8s-test0: direct0
s2-k8s-test1: direct0
+
+
+kubernetes_kubelet_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}"
+kubernetes_cri_socket: "unix:///run/containerd/containerd.sock"
diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml
new file mode 100644
index 00000000..aa34e6fe
--- /dev/null
+++ b/roles/kubernetes/base/tasks/cri_containerd.yml
@@ -0,0 +1,4 @@
+---
+- name: install containerd
+ include_role:
+ name: containerd
diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml
new file mode 100644
index 00000000..67196f51
--- /dev/null
+++ b/roles/kubernetes/base/tasks/cri_docker.yml
@@ -0,0 +1,8 @@
+---
+- name: disable bridge and iptables in docker daemon config
+ set_fact:
+ docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+
+- name: install docker
+ include_role:
+ name: docker
diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml
index 9c91e347..c3ab1c02 100644
--- a/roles/kubernetes/base/tasks/main.yml
+++ b/roles/kubernetes/base/tasks/main.yml
@@ -1,4 +1,7 @@
---
+- name: install container runtime
+ include_tasks: "cri_{{ kubernetes_container_runtime }}.yml"
+
- name: prepare /var/lib/kubelet as LVM
when: kubelet_lvm is defined
import_tasks: lvm.yml
diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml
index 2d9b9eed..76953498 100644
--- a/roles/kubernetes/kubeadm/base/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/main.yml
@@ -16,10 +16,11 @@
selection: hold
- name: set kubelet node-ip
+ when: kubernetes_kubelet_node_ip is defined
lineinfile:
name: "/etc/default/kubelet"
regexp: '^KUBELET_EXTRA_ARGS='
- line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) | ipaddr(1) | ipaddr("address") }}'
+ line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}'
- name: add kubectl/kubeadm completion for shells
loop:
diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml
index b604302b..2f9391fc 100644
--- a/roles/kubernetes/net/kubeguard/tasks/add.yml
+++ b/roles/kubernetes/net/kubeguard/tasks/add.yml
@@ -1,4 +1,8 @@
---
+- name: install wireguard
+ include_role:
+ name: wireguard/base
+
- name: create network config directory
file:
name: /var/lib/kubeguard/
diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
index 9c2d8a63..98b38cf4 100644
--- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
+++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2
@@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}"
POD_NET_CIDR="{{ kubernetes.pod_ip_range }}"
-{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) -%}
+{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%}
BR_IF="kube-br0"
BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}"
BR_IP_CIDR="{{ br_net | ipaddr(1) }}"
BR_NET_CIDR="{{ br_net }}"
TUN_IF="kube-wg0"
-TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[inventory_hostname]) }}"
+TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}"
case "$1" in
diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
index 62900c6a..65b1357a 100644
--- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
+++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2
@@ -7,6 +7,6 @@
"hairpinMode": true,
"ipam": {
"type": "host-local",
- "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[inventory_hostname]) }}"
+ "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}"
}
}
diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
index 1bbb3b72..48feb8ba 100644
--- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
+++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2
@@ -4,14 +4,14 @@ After=network.target
Requires=kubeguard-interfaces.service
After=kubeguard-interfaces.service
-{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.host_index[peer]) -%}
+{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%}
{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%}
{% if direct_zone %}
-{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[inventory_hostname]) %}
+{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %}
{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %}
-{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.host_index[peer]) %}
+{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %}
{% else %}
-{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.host_index[peer]) -%}
+{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%}
{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%}
{% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%}
{% set wg_port = hostvars[peer].kubeguard_wireguard_port -%}
diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml
new file mode 100644
index 00000000..50f4ccac
--- /dev/null
+++ b/spreadspace/k8s-test.yml
@@ -0,0 +1,14 @@
+---
+- name: cluster layout
+ hosts: localhost
+ gather_facts: no
+ run_once: yes
+ tasks:
+ - name: configure cluster layout
+ set_fact:
+ kubernetes_cluster_layout:
+ nodes: "{{ groups['k8s-test'] }}"
+ masters:
+ - s2-k8s-test0
+
+- import_playbook: ../common/kubernetes.yml
diff --git a/spreadspace/s2-k8s-test.yml b/spreadspace/s2-k8s-test.yml
deleted file mode 100644
index aa80d40b..00000000
--- a/spreadspace/s2-k8s-test.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-## TODO: implement me!