--- - name: create host groups hosts: localhost gather_facts: no tasks: - name: sanity check - fail if masters are not included in nodes assert: msg: "kubernetes_cluster_layout.nodes must include all nodes (master and non-master)" that: kubernetes_cluster_layout.masters | difference(kubernetes_cluster_layout.nodes) | length == 0 - name: sanity check - fail if primary master is not in masters when: kubernetes_cluster_layout.primary_master is defined assert: msg: "kubernetes_cluster_layout.masters must include kubernetes_cluster_layout.primary_master" that: kubernetes_cluster_layout.primary_master in kubernetes_cluster_layout.masters - name: sanity check - fail on multiple masters if no primary master is configured assert: msg: "For multiple masters to work you need to define kubernetes_cluster_layout.primary_master" that: (kubernetes_cluster_layout.masters | length) == 1 or kubernetes_cluster_layout.primary_master is defined - name: create group for all kubernetes nodes loop: "{{ kubernetes_cluster_layout.nodes }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" group: _kubernetes_nodes_ changed_when: False - name: create group for kubernetes master nodes loop: "{{ kubernetes_cluster_layout.masters }}" add_host: name: "{{ item }}" inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" group: _kubernetes_masters_ changed_when: False - name: create group for kubernetes primary master add_host: name: "{{ kubernetes_cluster_layout.primary_master | default(kubernetes_cluster_layout.masters[0]) }}" inventory_dir: "{{ hostvars[kubernetes_cluster_layout.masters[0]].inventory_dir }}" group: _kubernetes_primary_master_ changed_when: False - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no run_once: yes tasks: - name: sanity checks for kubeguard when: kubernetes_network_plugin == 'kubeguard' block: - name: check whether every node has a node_index assigned assert: msg: "There are nodes without an assigned node_index: {{ groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | join(', ') }}" that: groups['_kubernetes_nodes_'] | difference(kubeguard.node_index.keys()) | length == 0 - name: check whether node indizes are unique assert: msg: "There are duplicate entries in the node_index table, every node_index is only allowed once" that: (kubeguard.node_index.keys() | length) == (kubeguard.node_index.values() | unique | length) - name: check whether node indizes are all > 0 assert: msg: "At least one node_index is < 1 (indizes start at 1)" that: (kubeguard.node_index.values() | min) > 0 - name: make sure the kubernetes_cri_socket variable is configured correctly when: kubernetes_container_runtime == 'containerd' assert: msg: "The variable kubernetes_cri_socket is not configured correctly for use with containerd!" that: - kubernetes_cri_socket == "unix:///run/containerd/containerd.sock" ######## - name: kubernetes base installation hosts: _kubernetes_nodes_ roles: - role: kubernetes/net/kubeguard when: kubernetes_network_plugin == 'kubeguard' - role: kubernetes/base - role: kubernetes/kubeadm/base # - name: configure kubernetes primary master # hosts: _kubernetes_primary_master_ # roles: # - role: kubernetes/kubeadm/master/common # - role: kubernetes/kubeadm/master/primary # - name: configure kubernetes secondary masters # hosts: _kubernetes_masters_:!_kubernetes_primary_master_ # roles: # - role: kubernetes/kubeadm/master/common # - role: kubernetes/kubeadm/master/secondary # - name: configure kubernetes non-master nodes # hosts: _kubernetes_nodes_:!_kubernetes_masters_ # roles: # - role: kubernetes/kubeadm/node ######## # - name: check for nodes to be removed # hosts: _kubernetes_primary_master_ # tasks: # - name: fetch list of current nodes # command: kubectl get nodes -o name # changed_when: False # check_mode: no # register: kubectl_node_list # - name: generate list of nodes to be removed # loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(kubernetes_nodes) }}" # add_host: # name: "{{ item }}" # inventory_dir: "{{ inventory_dir }}" # group: _kubernetes_nodes_remove_ # changed_when: False # - name: drain superflous nodes # loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" # command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" # - name: try to clean superflous nodes # hosts: _kubernetes_nodes_remove_ # roles: # - role: kubernetes/kubeadm/reset # - role: kubernetes/net/kubeguard # when: kubernetes_network_plugin == 'kubeguard' # vars: # kubeguard_action: remove # - name: remove node from api server # hosts: _kubernetes_primary_master_ # tasks: # - name: remove superflous nodes # loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" # command: "kubectl delete node {{ item }}" # - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves # when: kube_bootstrap_token != "" # pause: # seconds: 42 # - name: remove bootstrap-token # when: kube_bootstrap_token != "" # command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" ### TODO: add node labels (ie. for ingress daeomnset)