--- - import_playbook: kubernetes-prepare.yml - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no run_once: yes tasks: - name: sanity checks for kubeguard when: kubernetes_network_plugin == 'kubeguard' block: - name: check whether every node has a node_index assigned assert: msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}" that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0 - name: check whether node indizes are unique assert: msg: "There are duplicate entries in the node_index table, every node_index is only allowed once" that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length) - name: check whether node indizes are all > 0 assert: msg: "At least one node_index is < 1 (indizes start at 1)" that: (kubeguard.node_index.values() | min) > 0 - name: make sure the kubernetes_cri_socket variable is configured correctly when: kubernetes_container_runtime == 'containerd' assert: msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!" that: - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock" ######## - name: kubernetes base installation hosts: _kubernetes_nodes_ roles: - role: kubernetes/net/kubeguard when: kubernetes_network_plugin == 'kubeguard' - role: kubernetes/base - role: kubernetes/kubeadm/base - name: configure kubernetes primary master hosts: _kubernetes_primary_master_ roles: - role: kubernetes/kubeadm/master # - name: configure kubernetes secondary masters # hosts: _kubernetes_masters_:!_kubernetes_primary_master_ # roles: # - role: kubernetes/kubeadm/master # - name: configure kubernetes non-master nodes # hosts: _kubernetes_nodes_:!_kubernetes_masters_ # roles: # - role: kubernetes/kubeadm/node ######## # - name: check for nodes to be removed # hosts: _kubernetes_primary_master_ # tasks: # - name: fetch list of current nodes # command: kubectl get nodes -o name # changed_when: False # check_mode: no # register: kubectl_node_list # - name: generate list of nodes to be removed # loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" # add_host: # name: "{{ item }}" # inventory_dir: "{{ inventory_dir }}" # group: _kubernetes_nodes_remove_ # changed_when: False # - name: drain superflous nodes # loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" # command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" # - name: try to clean superflous nodes # hosts: _kubernetes_nodes_remove_ # roles: # - role: kubernetes/kubeadm/reset # - role: kubernetes/net/kubeguard # when: kubernetes_network_plugin == 'kubeguard' # vars: # kubeguard_action: remove # - name: remove node from api server # hosts: _kubernetes_primary_master_ # tasks: # - name: remove superflous nodes # loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" # command: "kubectl delete node {{ item }}" # - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves # when: kube_bootstrap_token != "" # pause: # seconds: 42 # - name: remove bootstrap-token # when: kube_bootstrap_token != "" # command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" ### TODO: add node labels (ie. for ingress daeomnset)