summaryrefslogtreecommitdiff
path: root/common/kubernetes.yml
diff options
context:
space:
mode:
Diffstat (limited to 'common/kubernetes.yml')
-rw-r--r--common/kubernetes.yml212
1 files changed, 130 insertions, 82 deletions
diff --git a/common/kubernetes.yml b/common/kubernetes.yml
index 311f3ebd..67f2dd68 100644
--- a/common/kubernetes.yml
+++ b/common/kubernetes.yml
@@ -1,101 +1,149 @@
---
+- name: create host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: sanity check - fail if masters are not included in nodes
+ assert:
+ msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)"
+ that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0
+
+ - name: sanity check - fail if primary master is not in masters
+ when: kubernetes_cluster_layout.primary_master is defined
+ assert:
+ msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
+ that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters
+
+ - name: sanity check - fail on multiple masters if no primary master is configured
+ assert:
+ msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
+ that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined
+
+ - name: create group for all kubernetes nodes
+ loop: "{{ kubernetes_cluster_layout.nodes }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_nodes_
+ changed_when: False
+
+ - name: create group for kubernetes master nodes
+ loop: "{{ kubernetes_cluster_layout.masters }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_masters_
+ changed_when: False
+
+ - name: create group for kubernetes primary master
+ add_host:
+ name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
+ inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
+ group: _kubernetes_primary_master_
+ changed_when: False
+
- name: prepare variables and do some sanity checks
hosts: _kubernetes_nodes_
gather_facts: no
run_once: yes
tasks:
- - name: check if master group contains only one node
- fail:
- msg: "There must be exactly one master node defined"
- failed_when: (groups['_kubernetes_masters_'] | length) != 1
-
- - name: setup variables
- set_fact:
- kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}"
- kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}"
-
- - name: check whether every node has a net_index assigned
- fail:
- msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}"
- failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0
-
- - name: check whether net indizes are unique
- fail:
- msg: "There are duplicate entries in the net_index table, every net-index is only allowed once"
- failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length)
-
- - name: check whether net indizes are all > 0
- fail:
- msg: "At least one net-index is < 1 (indizes start at 1)"
- failed_when: (kubernetes.net_index.values() | min) < 1
-
- - name: disable bridge and iptables in docker daemon config
- set_fact:
- docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+ - name: sanity checks for kubeguard
+ when: kubernetes.network_plugin == 'kubeguard'
+ block:
+ - name: check whether every node has a node_index assigned
+ assert:
+ msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
+ that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0
+
+ - name: check whether node indizes are unique
+ assert:
+ msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
+ that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)
+
+ - name: check whether node indizes are all > 0
+ assert:
+ msg: "At least one node_index is < 1 (indizes start at 1)"
+ that: (kubeguard.node_index.values() | min) > 0
+
+ - name: make sure the kubernetes_cri_socket variable is configured correctly
+ when: kubernetes.container_runtime == 'containerd'
+ assert:
+ msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
+ that:
+ - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"
+
########
-- name: install kubernetes and overlay network
+- name: kubernetes base installation
hosts: _kubernetes_nodes_
roles:
- - role: docker
- role: kubernetes/net/kubeguard
+ when: kubernetes.network_plugin == 'kubeguard'
- role: kubernetes/base
- role: kubernetes/kubeadm/base
-- name: configure kubernetes master
- hosts: _kubernetes_masters_
- roles:
- - role: kubernetes/kubeadm/master
+# - name: configure kubernetes primary master
+# hosts: _kubernetes_primary_master_
+# roles:
+# - role: kubernetes/kubeadm/master/common
+# - role: kubernetes/kubeadm/master/primary
-- name: configure kubernetes non-master nodes
- hosts: _kubernetes_nodes_:!_kubernetes_masters_
- roles:
- - role: kubernetes/kubeadm/node
-
-########
-- name: check for nodes to be removed
- hosts: _kubernetes_masters_
- tasks:
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: generate list of nodes to be removed
- loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
- add_host:
- name: "{{ item }}"
- inventory_dir: "{{ inventory_dir }}"
- group: _kubernetes_nodes_remove_
- changed_when: False
+# - name: configure kubernetes secondary masters
+# hosts: _kubernetes_masters_:!_kubernetes_primary_master_
+# roles:
+# - role: kubernetes/kubeadm/master/common
+# - role: kubernetes/kubeadm/master/secondary
- - name: drain superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+# - name: configure kubernetes non-master nodes
+# hosts: _kubernetes_nodes_:!_kubernetes_masters_
+# roles:
+# - role: kubernetes/kubeadm/node
-- name: try to clean superflous nodes
- hosts: _kubernetes_nodes_remove_
- roles:
- - role: kubernetes/kubeadm/reset
- - role: kubernetes/net/kubeguard
- vars:
- kubeguard_remove_node: yes
-
-- name: remove node from api server
- hosts: _kubernetes_masters_
- tasks:
- - name: remove superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl delete node {{ item }}"
-
- - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
- when: kube_bootstrap_token != ""
- pause:
- seconds: 42
-
- - name: remove bootstrap-token
- when: kube_bootstrap_token != ""
- command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
+########
+# - name: check for nodes to be removed
+# hosts: _kubernetes_primary_master_
+# tasks:
+# - name: fetch list of current nodes
+# command: kubectl get nodes -o name
+# changed_when: False
+# check_mode: no
+# register: kubectl_node_list
+
+# - name: generate list of nodes to be removed
+# loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
+# add_host:
+# name: "{{ item }}"
+# inventory_dir: "{{ inventory_dir }}"
+# group: _kubernetes_nodes_remove_
+# changed_when: False
+
+# - name: drain superflous nodes
+# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+# command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+
+# - name: try to clean superflous nodes
+# hosts: _kubernetes_nodes_remove_
+# roles:
+# - role: kubernetes/kubeadm/reset
+# - role: kubernetes/net/kubeguard
+# when: kubernetes.network_plugin == 'kubeguard'
+# vars:
+# kubeguard_remove_node: yes
+
+# - name: remove node from api server
+# hosts: _kubernetes_primary_master_
+# tasks:
+# - name: remove superflous nodes
+# loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+# command: "kubectl delete node {{ item }}"
+
+# - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
+# when: kube_bootstrap_token != ""
+# pause:
+# seconds: 42
+
+# - name: remove bootstrap-token
+# when: kube_bootstrap_token != ""
+# command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
### TODO: add node labels (ie. for ingress daeomnset)