summaryrefslogtreecommitdiff
path: root/common/kubernetes.yml
blob: adcaf5e767d04191ede6abdda226a9da3ade1a1d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
---
- import_playbook: kubernetes-prepare.yml

- name: prepare variables and do some sanity checks
  hosts: _kubernetes_nodes_
  gather_facts: no
  run_once: yes
  tasks:
  - name: sanity checks for kubeguard
    when: kubernetes_network_plugin == 'kubeguard'
    block:
    - name: check whether every node has a node_index assigned
      assert:
        msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}"
        that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0

    - name: check whether node indizes are unique
      assert:
        msg: "There are duplicate entries in the node_index table, every node_index is only allowed once"
        that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length)

    - name: check whether node indizes are all > 0
      assert:
        msg: "At least one node_index is < 1 (indizes start at 1)"
        that: (kubeguard.node_index.values() | min) > 0

  - name: make sure the kubernetes_cri_socket variable is configured correctly
    when: kubernetes_container_runtime == 'containerd'
    assert:
      msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!"
      that:
      - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"


########
- name: kubernetes base installation
  hosts: _kubernetes_nodes_
  roles:
  - role: kubernetes/net/kubeguard
    when: kubernetes_network_plugin == 'kubeguard'
  - role: kubernetes/base
  - role: kubernetes/kubeadm/base

- name: configure kubernetes primary master
  hosts: _kubernetes_primary_master_
  roles:
  - role: kubernetes/kubeadm/master

# - name: configure kubernetes secondary masters
#   hosts: _kubernetes_masters_:!_kubernetes_primary_master_
#   roles:
#   - role: kubernetes/kubeadm/master

# - name: configure kubernetes non-master nodes
#   hosts: _kubernetes_nodes_:!_kubernetes_masters_
#   roles:
#   - role: kubernetes/kubeadm/node

########
# - name: check for nodes to be removed
#   hosts: _kubernetes_primary_master_
#   tasks:
#   - name: fetch list of current nodes
#     command: kubectl get nodes -o name
#     changed_when: False
#     check_mode: no
#     register: kubectl_node_list

#   - name: generate list of nodes to be removed
#     loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}"
#     add_host:
#       name: "{{ item }}"
#       inventory_dir: "{{ inventory_dir }}"
#       group: _kubernetes_nodes_remove_
#     changed_when: False

#   - name: drain superflous nodes
#     loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
#     command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets"

# - name: try to clean superflous nodes
#   hosts: _kubernetes_nodes_remove_
#   roles:
#   - role: kubernetes/kubeadm/reset
#   - role: kubernetes/net/kubeguard
#     when: kubernetes_network_plugin == 'kubeguard'
#     vars:
#       kubeguard_action: remove

# - name: remove node from api server
#   hosts: _kubernetes_primary_master_
#   tasks:
#   - name: remove superflous nodes
#     loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
#     command: "kubectl delete node {{ item }}"

#   - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves
#     when: kube_bootstrap_token != ""
#     pause:
#       seconds: 42

#   - name: remove bootstrap-token
#     when: kube_bootstrap_token != ""
#     command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}"

### TODO: add node labels (ie. for ingress daeomnset)