blob: 1ad583afef5e375228c351c96ab7edc64964429a (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
|
---
- name: prepare variables and do some sanity checks
hosts: _kubernetes_nodes_
gather_facts: no
run_once: yes
tasks:
- name: check if master group contains only one node
fail:
msg: "There must be exactly one master node defined"
failed_when: (groups['_kubernetes_masters_'] | length) != 1
- name: setup variables
set_fact:
kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}"
kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}"
- name: check whether every node has a net_index assigned
fail:
msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}"
failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0
- name: check whether net indizes are unique
fail:
msg: "There are duplicate entries in the net_index table, every net-index is only allowed once"
failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length)
- name: check whether net indizes are all > 0
fail:
msg: "At least one net-index is < 1 (indizes start at 1)"
failed_when: (kubernetes.net_index.values() | min) < 1
########
- name: install kubernetes and overlay network
hosts: _kubernetes_nodes_
roles:
## Since `base` has a dependency for docker it would install and start the daemon
## without the docker daemon config file generated by `net`.
## This means that the docker daemon will create a bridge and install iptables rules
## upon first startup (the first time this playbook runs on a specific host).
## Since it is a tedious task to remove the interface and the firewall rules it is much
## easier to just run `net` before `base` as `net` does not need anything from `base`.
- role: kubernetes/net
- role: kubernetes/base
- name: configure kubernetes master
hosts: _kubernetes_masters_
roles:
- role: kubernetes/master
- name: configure kubernetes non-master nodes
hosts: _kubernetes_nodes_:!_kubernetes_masters_
roles:
- role: kubernetes/node
########
- name: check for nodes to be removed
hosts: _kubernetes_masters_
tasks:
- name: fetch list of current nodes
command: kubectl get nodes -o name
changed_when: False
check_mode: no
register: kubectl_node_list
- name: generate list of nodes to be removed
with_items: "{{ kubectl_node_list.stdout_lines | map('replace', 'nodes/', '') | list | difference(kubernetes_nodes) }}"
add_host:
name: "{{ item }}"
inventory_dir: "{{ inventory_dir }}"
group: _kubernetes_nodes_remove_
changed_when: False
- name: drain superflous nodes
with_items: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"
- name: try to clean superflous nodes
hosts: _kubernetes_nodes_remove_
vars:
kubernetes_remove_node: yes
roles:
- role: kubernetes/node
- role: kubernetes/net
- name: remove node from api server
hosts: _kubernetes_masters_
tasks:
- name: remove superflous nodes
with_items: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
command: "kubectl delete node {{ item }}"
- name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
when: kube_bootstrap_token != ""
pause:
seconds: 42
- name: remove bootstrap-token
when: kube_bootstrap_token != ""
command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"
### TODO: add node labels (ie. for ingress daeomnset)
|