--- - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no run_once: yes tasks: - name: check if master group contains only one node fail: msg: "There must be exactly one master node defined" failed_when: (groups['_kubernetes_masters_'] | length) != 1 - name: setup variables set_fact: kubernetes_nodes: "{{ groups['_kubernetes_nodes_'] }}" kubernetes_master: "{{ groups['_kubernetes_masters_'] | first }}" - name: check whether every node has a net_index assigned fail: msg: "There are nodes without an assigned net-index: {{ kubernetes_nodes | difference(kubernetes.net_index.keys()) | join(', ') }}" failed_when: kubernetes_nodes | difference(kubernetes.net_index.keys()) | length > 0 - name: check whether net indizes are unique fail: msg: "There are duplicate entries in the net_index table, every net-index is only allowed once" failed_when: (kubernetes.net_index.keys() | length) != (kubernetes.net_index.values() | unique | length) - name: check whether net indizes are all > 0 fail: msg: "At least one net-index is < 1 (indizes start at 1)" failed_when: (kubernetes.net_index.values() | min) < 1 ######## - name: install kubernetes and overlay network hosts: _kubernetes_nodes_ roles: ## Since `base` has a dependency for docker it would install and start the daemon ## without the docker daemon config file generated by `net`. ## This means that the docker daemon will create a bridge and install iptables rules ## upon first startup (the first time this playbook runs on a specific host). ## Since it is a tedious task to remove the interface and the firewall rules it is much ## easier to just run `net` before `base` as `net` does not need anything from `base`. - role: kubernetes/net - role: kubernetes/base - name: configure kubernetes master hosts: _kubernetes_masters_ roles: - role: kubernetes/master - name: configure kubernetes non-master nodes hosts: _kubernetes_nodes_:!_kubernetes_masters_ roles: - role: kubernetes/node ######## - name: check for nodes to be removed hosts: _kubernetes_masters_ tasks: - name: fetch list of current nodes command: kubectl get nodes -o name changed_when: False check_mode: no register: kubectl_node_list - name: generate list of nodes to be removed with_items: "{{ kubectl_node_list.stdout_lines | map('replace', 'nodes/', '') | list | difference(kubernetes_nodes) }}" add_host: name: "{{ item }}" inventory_dir: "{{ inventory_dir }}" group: _kubernetes_nodes_remove_ changed_when: False - name: drain superflous nodes with_items: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" command: "kubectl drain {{ item }} --delete-local-data --force --ignore-daemonsets" - name: try to clean superflous nodes hosts: _kubernetes_nodes_remove_ vars: kubernetes_remove_node: yes roles: - role: kubernetes/node - role: kubernetes/net - name: remove node from api server hosts: _kubernetes_masters_ tasks: - name: remove superflous nodes with_items: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}" command: "kubectl delete node {{ item }}" - name: wait a litte before removing bootstrap-token so new nodes have time to generate certificates for themselves when: kube_bootstrap_token != "" pause: seconds: 42 - name: remove bootstrap-token when: kube_bootstrap_token != "" command: "kubectl --namespace kube-system delete secret bootstrap-token-{{ kube_bootstrap_token.split('.') | first }}" ### TODO: add node labels (ie. for ingress daeomnset)