summaryrefslogtreecommitdiff
path: root/roles/kubernetes/kubeadm
diff options
context:
space:
mode:
Diffstat (limited to 'roles/kubernetes/kubeadm')
-rw-r--r--roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py33
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/main.yml23
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml8
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml95
-rw-r--r--roles/kubernetes/kubeadm/base/tasks/net_none.yml7
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j212
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j255
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j212
-rw-r--r--roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j237
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml10
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml13
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/net_none.yml2
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/primary-master.yml14
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml8
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2237
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2171
-rw-r--r--roles/kubernetes/kubeadm/node/tasks/main.yml6
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/main.yml9
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml2
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml14
-rw-r--r--roles/kubernetes/kubeadm/prune/tasks/net_none.yml2
-rw-r--r--roles/kubernetes/kubeadm/reset/handlers/main.yml4
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml9
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml26
24 files changed, 791 insertions, 18 deletions
diff --git a/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py
new file mode 100644
index 00000000..2220e545
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/filter_plugins/net_kubeguard.py
@@ -0,0 +1,33 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import errors
+
+
+def kubeguard_direct_net_zone(data, myname, peer):
+ try:
+ zones = []
+ for zone in data:
+ if myname in data[zone]['node_interface'] and peer in data[zone]['node_interface']:
+ zones.append(zone)
+
+ if not zones:
+ return ""
+ if len(zones) > 1:
+ raise errors.AnsibleFilterError("host '%s' and '%s' have multiple direct net zones in common: %s" %
+ (myname, peer, zones.join(',')))
+ return zones[0]
+
+ except Exception as e:
+ raise errors.AnsibleFilterError("kubeguard_direct_net_zones(): %s" % str(e))
+
+
+class FilterModule(object):
+
+ ''' Kubeguard Network Filters '''
+ filter_map = {
+ 'kubeguard_direct_net_zone': kubeguard_direct_net_zone,
+ }
+
+ def filters(self):
+ return self.filter_map
diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml
index 2d2bd324..7d882f31 100644
--- a/roles/kubernetes/kubeadm/base/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/base/tasks/main.yml
@@ -3,7 +3,7 @@
apt:
name:
- haproxy
- - hatop
+ - haproxyctl
- "kubeadm={{ kubernetes_version }}-00"
- "kubectl={{ kubernetes_version }}-00"
state: present
@@ -48,16 +48,13 @@
state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}"
enabled: yes
-- name: add hatop config for shells
- loop:
- - zsh
- - bash
- blockinfile:
- path: "/root/.{{ item }}rc"
- create: yes
- marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###"
- content: |
- alias hatop="hatop -s /var/run/haproxy/admin.sock"
+## loading the modules temporarly because kubeadm will complain if they are not there
+# but i don't think it is necessary to make this persistent, also ignoring changes here
+- name: load module br_netfilter to satisfy kubeadm init/join
+ modprobe:
+ name: br_netfilter
+ state: present
+ changed_when: false
-# - name: prepare network plugin
-# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
+- name: prepare network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml
new file mode 100644
index 00000000..246b20bc
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_kube-router.yml
@@ -0,0 +1,8 @@
+---
+- name: install packages needed for debugging kube-router
+ apt:
+ name:
+ - iptables
+ - ipvsadm
+ - ipset
+ state: present
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..2d706a03
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml
@@ -0,0 +1,95 @@
+---
+- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set
+ when:
+ - kubernetes_network_plugin_variant != 'with-kube-router'
+ run_once: yes
+ assert:
+ msg: "kubeguard variant '{{ kubernetes_network_plugin_variant }}' can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false or configure a differnt kubernetes_network_plugin_variant."
+ that:
+ - not kubernetes_network_plugin_replaces_kube_proxy
+
+
+- name: install wireguard
+ import_role:
+ name: wireguard/base
+
+- name: create network config directory
+ file:
+ name: /var/lib/kubeguard/
+ state: directory
+
+- name: install ifupdown script
+ template:
+ src: net_kubeguard/ifupdown.sh.j2
+ dest: /var/lib/kubeguard/ifupdown.sh
+ mode: 0755
+ # TODO: notify reload... this is unfortunately already to late because
+ # it must probably be brought down by the old version of the script
+
+- name: generate wireguard private key
+ shell: "umask 077; wg genkey > /var/lib/kubeguard/kubeguard-wg0.privatekey"
+ args:
+ creates: /var/lib/kubeguard/kubeguard-wg0.privatekey
+
+- name: fetch wireguard public key
+ shell: "wg pubkey < /var/lib/kubeguard/kubeguard-wg0.privatekey"
+ register: kubeguard_wireguard_pubkey
+ changed_when: false
+ check_mode: no
+
+- name: install systemd service unit for network interface
+ template:
+ src: net_kubeguard/interface.service.j2
+ dest: /etc/systemd/system/kubeguard-interface.service
+ # TODO: notify: reload???
+
+- name: make sure kubeguard interface service is started and enabled
+ systemd:
+ daemon_reload: yes
+ name: kubeguard-interface.service
+ state: started
+ enabled: yes
+
+- name: install systemd units for every kubeguard peer
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
+ loop_control:
+ loop_var: peer
+ template:
+ src: net_kubeguard/peer.service.j2
+ dest: "/etc/systemd/system/kubeguard-peer-{{ peer }}.service"
+ # TODO: notify restart for peers that change...
+
+- name: make sure kubeguard peer services are started and enabled
+ loop: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}"
+ systemd:
+ daemon_reload: yes
+ name: "kubeguard-peer-{{ item }}.service"
+ state: started
+ enabled: yes
+
+- name: enable IPv4 forwarding
+ sysctl:
+ name: net.ipv4.ip_forward
+ value: '1'
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+- name: create cni config directory
+ file:
+ name: /etc/cni/net.d
+ state: directory
+
+- name: install cni config
+ template:
+ src: net_kubeguard/cni.json.j2
+ dest: /etc/cni/net.d/kubeguard.json
+
+- name: install packages needed for debugging kube-router
+ when: kubernetes_network_plugin_variant == 'with-kube-router'
+ apt:
+ name:
+ - iptables
+ - ipvsadm
+ - ipset
+ state: present
diff --git a/roles/kubernetes/kubeadm/base/tasks/net_none.yml b/roles/kubernetes/kubeadm/base/tasks/net_none.yml
new file mode 100644
index 00000000..0924c458
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/tasks/net_none.yml
@@ -0,0 +1,7 @@
+---
+- name: make sure kubernetes_network_plugin_replaces_kube_proxy is not set
+ run_once: yes
+ assert:
+ msg: "this network plugin can not replace kube-proxy please set kubernetes_network_plugin_replaces_kube_proxy to false."
+ that:
+ - not kubernetes_network_plugin_replaces_kube_proxy
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
new file mode 100644
index 00000000..eb9e3d61
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2
@@ -0,0 +1,12 @@
+{
+ "cniVersion": "0.3.1",
+ "name": "kubeguard",
+ "type": "bridge",
+ "bridge": "kubeguard-br0",
+ "isDefaultGateway": true,
+ "hairpinMode": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}"
+ }
+}
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2
new file mode 100644
index 00000000..f940d413
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/ifupdown.sh.j2
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+set -e
+
+CONF_D="/var/lib/kubeguard/"
+
+INET_IF="{{ ansible_default_ipv4.interface }}"
+
+POD_NET_CIDR="{{ kubernetes.pod_ip_range }}"
+
+{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%}
+BR_IF="kubeguard-br0"
+BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}"
+BR_IP_CIDR="{{ br_net | ipaddr(1) }}"
+BR_NET_CIDR="{{ br_net }}"
+
+TUN_IF="kubeguard-wg0"
+TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}"
+
+
+case "$1" in
+ up)
+ # bring up bridge for local pods
+ ip link add dev "$BR_IF" type bridge
+ ip addr add dev "$BR_IF" "$BR_IP_CIDR"
+ ip link set up dev "$BR_IF"
+ iptables -t nat -A POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE
+ modprobe br_netfilter
+
+ # bring up wireguard tunnel to other nodes
+ ip link add dev "$TUN_IF" type wireguard
+ ip addr add dev "$TUN_IF" "$TUN_IP_CIDR"
+ wg set "$TUN_IF" listen-port {{ kubeguard_wireguard_port | default(51820) }} private-key "$CONF_D/$TUN_IF.privatekey"
+ ip link set up dev "$TUN_IF"
+
+ # make pods and service IPs reachable
+ # !!! use IP of bridge as source so we don't produce martians if direct-zones are involved!!!
+ ip route add "$POD_NET_CIDR" dev "$TUN_IF" src "$BR_IP"
+ ;;
+ down)
+ # bring down wireguard tunnel to other nodes
+ ip route del "$POD_NET_CIDR" dev "$TUN_IF"
+ ip link del dev "$TUN_IF"
+
+ # bring down bridge for local pods
+ iptables -t nat -D POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE
+ ip link del dev "$BR_IF"
+ ;;
+ *)
+ echo "usage: $0 (up|down)"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2
new file mode 100644
index 00000000..35fc8f90
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/interface.service.j2
@@ -0,0 +1,12 @@
+[Unit]
+Description=Kubeguard Network Setup
+After=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/var/lib/kubeguard/ifupdown.sh up
+ExecStop=/var/lib/kubeguard/ifupdown.sh down
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2
new file mode 100644
index 00000000..c9d96a5a
--- /dev/null
+++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/peer.service.j2
@@ -0,0 +1,37 @@
+[Unit]
+Description=Kubernetes Network Peer {{ peer }}
+After=network.target
+Requires=kubeguard-interface.service
+After=kubeguard-interface.service
+
+{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%}
+{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%}
+{% set direct_zone = kubeguard.direct_net_zones | default({}) | kubeguard_direct_net_zone(inventory_hostname, peer) -%}
+{% if direct_zone %}
+{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %}
+{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %}
+{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %}
+{% else %}
+{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%}
+{% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%}
+{% set wg_host = hostvars[peer].external_ip_cooked | default(hostvars[peer].ansible_default_ipv4.address) -%}
+{% set wg_port = hostvars[peer].kubeguard_wireguard_port | default(51820) -%}
+{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %}
+{% endif %}
+[Service]
+Type=oneshot
+{% if direct_zone %}
+ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }}
+ExecStart=/sbin/ip link set up dev {{ direct_interface }}
+ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }}
+ExecStop=/sbin/ip route del {{ pod_net_peer }}
+ExecStop=/sbin/ip link set down dev {{ direct_interface }}
+ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }}
+{% else %}
+ExecStart=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10
+ExecStop=/usr/bin/wg set kubeguard-wg0 peer {{ wg_pubkey }} remove
+{% endif %}
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
new file mode 100644
index 00000000..5368b6f5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml
@@ -0,0 +1,10 @@
+---
+- name: generate kube-router configuration
+ template:
+ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+- name: install kube-router on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kube_router_apply_result
+ changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..f364fb5f
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml
@@ -0,0 +1,13 @@
+---
+- name: install kube-router variant
+ when: "kubernetes_network_plugin_variant == 'with-kube-router'"
+ block:
+ - name: generate kubeguard (kube-router) configuration
+ template:
+ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+ - name: install kubeguard (kube-router) on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kubeguard_apply_result
+ changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/master/tasks/net_none.yml
new file mode 100644
index 00000000..bf1a16d5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/net_none.yml
@@ -0,0 +1,2 @@
+---
+## this "plugin" is for testing purposes only
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
index f24e9ac1..432f7479 100644
--- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
@@ -27,8 +27,8 @@
- name: initialize kubernetes master and store log
block:
- name: initialize kubernetes master
- command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
- # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
register: kubeadm_init
@@ -40,6 +40,12 @@
content: "{{ kubeadm_init.stdout }}\n"
dest: /etc/kubernetes/kubeadm-init.log
+ - name: dump error output of kubeadm init to log file
+ when: kubeadm_init.changed and kubeadm_init.stderr
+ copy:
+ content: "{{ kubeadm_init.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-init.errors
+
- name: create bootstrap token for existing cluster
command: kubeadm token create --ttl 42m
check_mode: no
@@ -119,5 +125,5 @@
## Network Plugin
-# - name: install network plugin
-# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
+- name: install network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
index 31fb31d6..610a8d3f 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -28,7 +28,7 @@
- name: join kubernetes secondary master node and store log
block:
- name: join kubernetes secondary master node
- throttle: 1 ## TODO test this!
+ throttle: 1
command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
creates: /etc/kubernetes/kubelet.conf
@@ -42,6 +42,12 @@
content: "{{ kubeadm_join.stdout }}\n"
dest: /etc/kubernetes/kubeadm-join.log
+ - name: dump error output of kubeadm join to log file
+ when: kubeadm_join.changed and kubeadm_join.stderr
+ copy:
+ content: "{{ kubeadm_join.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-join.errors
+
# TODO: acutally check if node has registered
- name: give the new master(s) a moment to register
when: kubeadm_join is changed
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
new file mode 100644
index 00000000..b06687d5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
@@ -0,0 +1,237 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
new file mode 100644
index 00000000..51bfdaae
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
@@ -0,0 +1,171 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=false
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ hostNetwork: true
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml
index 655b1b18..6b3d18ae 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/node/tasks/main.yml
@@ -14,3 +14,9 @@
copy: # noqa 503
content: "{{ kubeadm_join.stdout }}\n"
dest: /etc/kubernetes/kubeadm-join.log
+
+ - name: dump error output of kubeadm join to log file
+ when: kubeadm_join.changed and kubeadm_join.stderr
+ copy:
+ content: "{{ kubeadm_join.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-join.errors
diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml
new file mode 100644
index 00000000..71ed0d04
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: remove nodes from api server
+ run_once: true
+ delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ command: "kubectl delete node {{ item }}"
+
+- name: prune network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml
new file mode 100644
index 00000000..94832c38
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_kube-router.yml
@@ -0,0 +1,2 @@
+---
+## nothing to do here
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..8a8c7752
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_kubeguard.yml
@@ -0,0 +1,14 @@
+---
+- name: stop/disable systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ systemd:
+ name: "kubeguard-peer-{{ item }}.service"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+- name: remove systemd units for stale kubeguard peers
+ loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}"
+ file:
+ name: "/etc/systemd/system/kubeguard-peer-{{ item }}.service"
+ state: absent
diff --git a/roles/kubernetes/kubeadm/prune/tasks/net_none.yml b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml
new file mode 100644
index 00000000..94832c38
--- /dev/null
+++ b/roles/kubernetes/kubeadm/prune/tasks/net_none.yml
@@ -0,0 +1,2 @@
+---
+## nothing to do here
diff --git a/roles/kubernetes/kubeadm/reset/handlers/main.yml b/roles/kubernetes/kubeadm/reset/handlers/main.yml
new file mode 100644
index 00000000..bb7fde2b
--- /dev/null
+++ b/roles/kubernetes/kubeadm/reset/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+- name: reload systemd
+ systemd:
+ daemon_reload: yes
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index c35e2bfc..8a21fbd5 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -6,9 +6,15 @@
loop:
- /etc/kubernetes/kubeadm.config
- /etc/kubernetes/kubeadm-init.log
+ - /etc/kubernetes/kubeadm-init.errors
- /etc/kubernetes/kubeadm-join.log
+ - /etc/kubernetes/kubeadm-join.errors
- /etc/kubernetes/pki
- /etc/kubernetes/encryption
+ - /etc/kubernetes/network-plugin.yml
+ - /etc/kubernetes/node-local-dns.yml
+ - /etc/kubernetes/addons
+ - /etc/default/kubelet
file:
path: "{{ item }}"
state: absent
@@ -25,3 +31,6 @@
file:
path: "{{ item.path }}"
state: absent
+
+- name: extra-cleanup for kubeguard network plugin
+ import_tasks: net_kubeguard.yml
diff --git a/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..bcb48960
--- /dev/null
+++ b/roles/kubernetes/kubeadm/reset/tasks/net_kubeguard.yml
@@ -0,0 +1,26 @@
+---
+- name: check if kubeguard interface service unit exists
+ stat:
+ path: /etc/systemd/system/kubeguard-interface.service
+ register: kubeguard_interface_unit
+
+- name: bring down kubeguard interface
+ when: kubeguard_interface_unit.stat.exists
+ systemd:
+ name: kubeguard-interface.service
+ state: stopped
+
+- name: gather list of all kubeguard related service units
+ find:
+ path: /etc/systemd/system/
+ patterns:
+ - "kubeguard-peer-*.service"
+ - kubeguard-interface.service
+ register: kubeguard_units_installed
+
+- name: remove all kubeguard related files and directories
+ loop: "{{ kubeguard_units_installed.files | map(attribute='path') | list | flatten | union(['/var/lib/kubeguard']) }}"
+ file:
+ path: "{{ item }}"
+ state: absent
+ notify: reload systemd