summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--common/kubernetes-cluster-cleanup.yml6
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml2
-rw-r--r--roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml14
-rw-r--r--roles/kubernetes/net/kubeguard/node/tasks/main.yml31
4 files changed, 23 insertions, 30 deletions
diff --git a/common/kubernetes-cluster-cleanup.yml b/common/kubernetes-cluster-cleanup.yml
index 83d6945c..d56940ee 100644
--- a/common/kubernetes-cluster-cleanup.yml
+++ b/common/kubernetes-cluster-cleanup.yml
@@ -28,6 +28,12 @@
loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
command: "kubectl delete node {{ item }}"
+- name: cleanup kubeguard connections
+ hosts: _kubernetes_nodes_
+ roles:
+ - role: kubernetes/net/kubeguard/cleanup
+ when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard'
+
- name: try to clean superflous nodes
hosts: _kubernetes_nodes_remove_
roles:
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
index 37f108a7..610a8d3f 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -28,7 +28,7 @@
- name: join kubernetes secondary master node and store log
block:
- name: join kubernetes secondary master node
- throttle: 1 ## TODO test this!
+ throttle: 1
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
creates: /etc/kubernetes/kubelet.conf
diff --git a/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml b/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml
new file mode 100644
index 00000000..f15058d2
--- /dev/null
+++ b/roles/kubernetes/net/kubeguard/cleanup/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: stop/disable systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ systemd:
+ name: "kubeguard-peer-{{ item }}.service"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+- name: remove systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_remove_'] | default([]) }}"
+ file:
+ name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service"
+ state: absent
diff --git a/roles/kubernetes/net/kubeguard/node/tasks/main.yml b/roles/kubernetes/net/kubeguard/node/tasks/main.yml
index 0658b42c..72814e06 100644
--- a/roles/kubernetes/net/kubeguard/node/tasks/main.yml
+++ b/roles/kubernetes/net/kubeguard/node/tasks/main.yml
@@ -44,35 +44,8 @@
state: started
enabled: yes
-- name: get list of currently installed kubeguard peers
- find:
- path: /etc/systemd/system/
- pattern: "kubeguard-peer-*.service"
- register: kubeguard_peers_installed
-
-- name: compute list of peers to be added
- set_fact:
- kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
-
-- name: compute list of peers to be removed
- set_fact:
- kubeguard_peers_to_remove: "{{ kubeguard_peers_installed.files | map(attribute='path') | map('replace', '/etc/systemd/system/kubeguard-peer-', '') | map('replace', '.service', '') | difference(kubeguard_peers_to_add) }}"
-
-- name: stop/disable systemd units for stale kubeguard peers
- loop: "{{ kubeguard_peers_to_remove }}"
- systemd:
- name: "kubeguard-peer-{{ item }}.service"
- state: stopped
- enabled: no
-
-- name: remove systemd units for stale kubeguard peers
- loop: "{{ kubeguard_peers_to_remove }}"
- file:
- name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service"
- state: absent
-
- name: install systemd units for every kubeguard peer
- loop: "{{ kubeguard_peers_to_add }}"
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
loop_control:
loop_var: peer
template:
@@ -81,7 +54,7 @@
# TODO: notify restart for peers that change...
- name: make sure kubeguard peer services are started and enabled
- loop: "{{ kubeguard_peers_to_add }}"
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
systemd:
daemon_reload: yes
name: "kubeguard-peer-{{ item }}.service"