summaryrefslogtreecommitdiff
path: root/common/kubernetes.yml
blob: 67f2dd680405daa81263a2dce762f35efc859e4a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
---
- name: create host groups
  hosts: localhost
  gather_facts: no
  tasks:
  - name: sanity check - fail if masters are not included in nodes
    assert:
      msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)"
      that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0

  - name: sanity check - fail if primary master is not in masters
    when: kubernetes_cluster_layout.primary_master is defined
    assert:
      msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master"
      that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters

  - name: sanity check - fail on multiple masters if no primary master is configured
    assert:
      msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master"
      that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined

  - name: create group for all kubernetes nodes
    loop: "{{ kubernetes_cluster_layout.nodes }}"
    add_host:
      name: "{{ item }}"
      inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
      group: _kubernetes_nodes_
    changed_when: False

  - name: create group for kubernetes master nodes
    loop: "{{ kubernetes_cluster_layout.masters }}"
    add_host:
      name: "{{ item }}"
      inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
      group: _kubernetes_masters_
    changed_when: False

  - name: create group for kubernetes primary master
    add_host:
      name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}"
      inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}"
      group: _kubernetes_primary_master_
    changed_when: False

- name: prepare variables and do some sanity checks
  hosts: _kubernetes_nodes_
  gather_facts: no
  run_once: yes
  tasks:
  - name: sanity checks for kubeguard
    when: kubernetes.network_plugin == 'kubeguard'
    block:
    - name: check whether every node has a node_index assigned
      assert:
        msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
        that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0

    - name: check whether node indizes are unique
      assert:
        msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
        that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)

    - name: check whether node indizes are all > 0
      assert:
        msg: "At least one node_index is < 1 (indizes start at 1)"
        that: (kubeguard.node_index.values() | min) > 0

  - name: make sure the kubernetes_cri_socket variable is configured correctly
    when: kubernetes.container_runtime == 'containerd'
    assert:
      msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
      that:
      - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"


########
- name: kubernetes base installation
  hosts: _kubernetes_nodes_
  roles:
  - role: kubernetes/net/kubeguard
    when: kubernetes.network_plugin == 'kubeguard'
  - role: kubernetes/base
  - role: kubernetes/kubeadm/base

# - name: configure kubernetes primary master
#   hosts: _kubernetes_primary_master_
#   roles:
#   - role: kubernetes/kubeadm/master/common
#   - role: kubernetes/kubeadm/master/primary

# - name: configure kubernetes secondary masters
#   hosts: _kubernetes_masters_:!_kubernetes_primary_master_
#   roles:
#   - role: kubernetes/kubeadm/master/common
#   - role: kubernetes/kubeadm/master/secondary

# - name: configure kubernetes non-master nodes
#   hosts: _kubernetes_nodes_:!_kubernetes_masters_
#   roles:
#   - role: kubernetes/kubeadm/node

########
# - name: check for nodes to be removed
#   hosts: _kubernetes_primary_master_
#   tasks:
#   - name: fetch list of current nodes
#     command: kubectl get nodes -o name
#     changed_when: False
#     check_mode: no
#     register: kubectl_node_list

#   - name: generate list of nodes to be removed
#     loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
#     add_host:
#       name: "{{ item }}"
#       inventory_dir: "{{ inventory_dir }}"
#       group: _kubernetes_nodes_remove_
#     changed_when: False

#   - name: drain superflous nodes
#     loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
#     command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"

# - name: try to clean superflous nodes
#   hosts: _kubernetes_nodes_remove_
#   roles:
#   - role: kubernetes/kubeadm/reset
#   - role: kubernetes/net/kubeguard
#     when: kubernetes.network_plugin == 'kubeguard'
#     vars:
#       kubeguard_remove_node: yes

# - name: remove node from api server
#   hosts: _kubernetes_primary_master_
#   tasks:
#   - name: remove superflous nodes
#     loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
#     command: "kubectl delete node {{ item }}"

#   - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
#     when: kube_bootstrap_token != ""
#     pause:
#       seconds: 42

#   - name: remove bootstrap-token
#     when: kube_bootstrap_token != ""
#     command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"

### TODO: add node labels (ie. for ingress daeomnset)