summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
authorChristian Pointner <equinox@spreadspace.org>2020-02-01 02:48:06 +0100
committerChristian Pointner <equinox@spreadspace.org>2020-02-01 02:48:06 +0100
commit844bc9826f652d6bebe55c66e44eb69bd89575bf (patch)
tree5c60fe1648e5a1db4b54c4ce02e72c4cf0bd8939 /common
parentupdated all nexcloud instances on sk-cloudia (diff)
parentkubernetes standalone with docker (diff)
Merge branch 'topic/kubernetes-ng'
Diffstat (limited to 'common')
-rw-r--r--common/kubernetes-cleanup.yml37
-rw-r--r--common/kubernetes-cluster-layout.yml46
-rw-r--r--common/kubernetes.yml116
3 files changed, 121 insertions, 78 deletions
diff --git a/common/kubernetes-cleanup.yml b/common/kubernetes-cleanup.yml
new file mode 100644
index 00000000..be55d11e
--- /dev/null
+++ b/common/kubernetes-cleanup.yml
@@ -0,0 +1,37 @@
+---
+- name: check for nodes to be removed
+ hosts: _kubernetes_primary_master_
+ tasks:
+ - name: fetch list of current nodes
+ command: kubectl get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: generate list of nodes to be removed
+ loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(groups['_kubernetes_nodes_']) }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_nodes_remove_
+ changed_when: False
+
+ - name: drain superflous nodes
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
+
+- name: try to clean superflous nodes
+ hosts: _kubernetes_nodes_remove_
+ roles:
+ - role: kubernetes/kubeadm/reset
+ - role: kubernetes/net/kubeguard
+ when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard'
+ vars:
+ kubeguard_action: remove
+
+- name: remove node from api server
+ hosts: _kubernetes_primary_master_
+ tasks:
+ - name: remove superflous nodes
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ command: "kubectl delete node {{ item }}"
diff --git a/common/kubernetes-cluster-layout.yml b/common/kubernetes-cluster-layout.yml
new file mode 100644
index 00000000..64856fc5
--- /dev/null
+++ b/common/kubernetes-cluster-layout.yml
@@ -0,0 +1,46 @@
+---
+- name: create host groups for kubernetes cluster
+ hosts: "{{ kubernetes_cluster_layout.nodes_group }}"
+ connection: local
+ gather_facts: no
+ run_once: yes
+ tasks:
+ - name: sanity check - fail if masters are not included in nodes
+ assert:
+ msg: "the cluster node group '{{ kubernetes_cluster_layout.nodes_group }}' must include *all* nodes (master and non-master)"
+ that: kubernetes_cluster_layout.masters | difference(ansible_play_hosts_all) | length == 0
+
+ - name: sanity check - fail if primary master is not in masters
+ assert:
+ msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
+ that: kubernetes_cluster_layout.primary_master is undefined or kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters
+
+ - name: sanity check - fail on multiple masters if no primary master is configured
+ assert:
+ msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
+ that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined
+
+ - name: create group for all kubernetes nodes
+ loop: "{{ ansible_play_hosts_all }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_nodes_
+ changed_when: False
+
+ - name: create group for kubernetes master nodes
+ loop: "{{ kubernetes_cluster_layout.masters }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_masters_
+ changed_when: False
+
+ - name: create group for kubernetes primary master
+ vars:
+ item: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
+ add_host:
+ name: "{{ item }}"
+ inventory_dir: "{{ hostvars[item].inventory_dir }}"
+ group: _kubernetes_primary_master_
+ changed_when: False
diff --git a/common/kubernetes.yml b/common/kubernetes.yml
index 311f3ebd..d5b58767 100644
--- a/common/kubernetes.yml
+++ b/common/kubernetes.yml
@@ -4,46 +4,52 @@
gather_facts: no
run_once: yes
tasks:
- - name: check if master group contains only one node
- fail:
- msg: "There must be exactly one master node defined"
- failed_when: (groups['_kubernetes_masters_'] | length) != 1
-
- - name: setup variables
- set_fact:
- kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}"
- kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}"
-
- - name: check whether every node has a net_index assigned
- fail:
- msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}"
- failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0
-
- - name: check whether net indizes are unique
- fail:
- msg: "There are duplicate entries in the net_index table, every net-index is only allowed once"
- failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length)
-
- - name: check whether net indizes are all > 0
- fail:
- msg: "At least one net-index is < 1 (indizes start at 1)"
- failed_when: (kubernetes.net_index.values() | min) < 1
-
- - name: disable bridge and iptables in docker daemon config
- set_fact:
- docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+ - name: sanity checks for kubeguard
+ when: kubernetes_network_plugin == 'kubeguard'
+ block:
+ - name: check whether every node has a node_index assigned
+ assert:
+ msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
+ that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0
+
+ - name: check whether node indizes are unique
+ assert:
+ msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
+ that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)
+
+ - name: check whether node indizes are all > 0
+ assert:
+ msg: "At least one node_index is < 1 (indizes start at 1)"
+ that: (kubeguard.node_index.values() | min) > 0
+
+ - name: check whether overlay node io is configured > 0
+ assert:
+ msg: "For kubeguard to work you need to configure kubernetes_overlay_node_ip"
+ that: kubernetes_overlay_node_ip is defined
+
+ - name: make sure the kubernetes_cri_socket variable is configured correctly
+ when: kubernetes_container_runtime == 'containerd'
+ assert:
+ msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
+ that:
+ - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"
########
-- name: install kubernetes and overlay network
+- name: kubernetes base installation
hosts: _kubernetes_nodes_
roles:
- - role: docker
- role: kubernetes/net/kubeguard
+ when: kubernetes_network_plugin == 'kubeguard'
- role: kubernetes/base
- role: kubernetes/kubeadm/base
-- name: configure kubernetes master
- hosts: _kubernetes_masters_
+- name: configure kubernetes primary master
+ hosts: _kubernetes_primary_master_
+ roles:
+ - role: kubernetes/kubeadm/master
+
+- name: configure kubernetes secondary masters
+ hosts: _kubernetes_masters_:!_kubernetes_primary_master_
roles:
- role: kubernetes/kubeadm/master
@@ -52,50 +58,4 @@
roles:
- role: kubernetes/kubeadm/node
-########
-- name: check for nodes to be removed
- hosts: _kubernetes_masters_
- tasks:
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: generate list of nodes to be removed
- loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
- add_host:
- name: "{{ item }}"
- inventory_dir: "{{ inventory_dir }}"
- group: _kubernetes_nodes_remove_
- changed_when: False
-
- - name: drain superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
-
-- name: try to clean superflous nodes
- hosts: _kubernetes_nodes_remove_
- roles:
- - role: kubernetes/kubeadm/reset
- - role: kubernetes/net/kubeguard
- vars:
- kubeguard_remove_node: yes
-
-- name: remove node from api server
- hosts: _kubernetes_masters_
- tasks:
- - name: remove superflous nodes
- loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
- command: "kubectl delete node {{ item }}"
-
- - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
- when: kube_bootstrap_token != ""
- pause:
- seconds: 42
-
- - name: remove bootstrap-token
- when: kube_bootstrap_token != ""
- command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
-
### TODO: add node labels (ie. for ingress daeomnset)