From aef8d86e4b1fc2970e27e578a6dd92ae3024f933 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 24 May 2018 23:53:00 +0200 Subject: move kubernetes roles to subdir --- playbooks/k8s-emc.yml | 12 +-- roles/kubernetes-base/files/kubernetes-apt-key.asc | 18 ---- roles/kubernetes-base/handlers/main.yml | 5 - roles/kubernetes-base/meta/main.yml | 3 - roles/kubernetes-base/tasks/main.yml | 90 ---------------- roles/kubernetes-base/templates/20-dns.conf.j2 | 2 - roles/kubernetes-base/templates/50-extra.conf.j2 | 3 - roles/kubernetes-master/tasks/main.yml | 98 ------------------ roles/kubernetes-net/files/daemon.json | 4 - .../files/kubenet-interfaces.service | 12 --- roles/kubernetes-net/filter_plugins/kubenet.py | 33 ------ roles/kubernetes-net/handlers/main.yml | 4 - roles/kubernetes-net/meta/main.yml | 4 - roles/kubernetes-net/tasks/add.yml | 114 --------------------- roles/kubernetes-net/tasks/main.yml | 8 -- roles/kubernetes-net/tasks/remove.yml | 28 ----- roles/kubernetes-net/templates/ifupdown.sh.j2 | 55 ---------- roles/kubernetes-net/templates/k8s.json.j2 | 12 --- .../templates/kubenet-peer.service.j2 | 36 ------- roles/kubernetes-node/tasks/add.yml | 18 ---- roles/kubernetes-node/tasks/main.yml | 8 -- roles/kubernetes-node/tasks/remove.yml | 3 - roles/kubernetes/base/files/kubernetes-apt-key.asc | 18 ++++ roles/kubernetes/base/handlers/main.yml | 5 + roles/kubernetes/base/meta/main.yml | 3 + roles/kubernetes/base/tasks/main.yml | 90 ++++++++++++++++ roles/kubernetes/base/templates/20-dns.conf.j2 | 2 + roles/kubernetes/base/templates/50-extra.conf.j2 | 3 + roles/kubernetes/master/tasks/main.yml | 98 ++++++++++++++++++ roles/kubernetes/net/files/daemon.json | 4 + .../net/files/kubenet-interfaces.service | 12 +++ roles/kubernetes/net/filter_plugins/kubenet.py | 33 ++++++ roles/kubernetes/net/handlers/main.yml | 4 + roles/kubernetes/net/meta/main.yml | 4 + roles/kubernetes/net/tasks/add.yml | 114 +++++++++++++++++++++ roles/kubernetes/net/tasks/main.yml | 8 ++ roles/kubernetes/net/tasks/remove.yml | 28 +++++ roles/kubernetes/net/templates/ifupdown.sh.j2 | 55 ++++++++++ roles/kubernetes/net/templates/k8s.json.j2 | 12 +++ .../net/templates/kubenet-peer.service.j2 | 36 +++++++ roles/kubernetes/node/tasks/add.yml | 18 ++++ roles/kubernetes/node/tasks/main.yml | 8 ++ roles/kubernetes/node/tasks/remove.yml | 3 + 43 files changed, 564 insertions(+), 564 deletions(-) delete mode 100644 roles/kubernetes-base/files/kubernetes-apt-key.asc delete mode 100644 roles/kubernetes-base/handlers/main.yml delete mode 100644 roles/kubernetes-base/meta/main.yml delete mode 100644 roles/kubernetes-base/tasks/main.yml delete mode 100644 roles/kubernetes-base/templates/20-dns.conf.j2 delete mode 100644 roles/kubernetes-base/templates/50-extra.conf.j2 delete mode 100644 roles/kubernetes-master/tasks/main.yml delete mode 100644 roles/kubernetes-net/files/daemon.json delete mode 100644 roles/kubernetes-net/files/kubenet-interfaces.service delete mode 100644 roles/kubernetes-net/filter_plugins/kubenet.py delete mode 100644 roles/kubernetes-net/handlers/main.yml delete mode 100644 roles/kubernetes-net/meta/main.yml delete mode 100644 roles/kubernetes-net/tasks/add.yml delete mode 100644 roles/kubernetes-net/tasks/main.yml delete mode 100644 roles/kubernetes-net/tasks/remove.yml delete mode 100644 roles/kubernetes-net/templates/ifupdown.sh.j2 delete mode 100644 roles/kubernetes-net/templates/k8s.json.j2 delete mode 100644 roles/kubernetes-net/templates/kubenet-peer.service.j2 delete mode 100644 roles/kubernetes-node/tasks/add.yml delete mode 100644 roles/kubernetes-node/tasks/main.yml delete mode 100644 roles/kubernetes-node/tasks/remove.yml create mode 100644 roles/kubernetes/base/files/kubernetes-apt-key.asc create mode 100644 roles/kubernetes/base/handlers/main.yml create mode 100644 roles/kubernetes/base/meta/main.yml create mode 100644 roles/kubernetes/base/tasks/main.yml create mode 100644 roles/kubernetes/base/templates/20-dns.conf.j2 create mode 100644 roles/kubernetes/base/templates/50-extra.conf.j2 create mode 100644 roles/kubernetes/master/tasks/main.yml create mode 100644 roles/kubernetes/net/files/daemon.json create mode 100644 roles/kubernetes/net/files/kubenet-interfaces.service create mode 100644 roles/kubernetes/net/filter_plugins/kubenet.py create mode 100644 roles/kubernetes/net/handlers/main.yml create mode 100644 roles/kubernetes/net/meta/main.yml create mode 100644 roles/kubernetes/net/tasks/add.yml create mode 100644 roles/kubernetes/net/tasks/main.yml create mode 100644 roles/kubernetes/net/tasks/remove.yml create mode 100644 roles/kubernetes/net/templates/ifupdown.sh.j2 create mode 100644 roles/kubernetes/net/templates/k8s.json.j2 create mode 100644 roles/kubernetes/net/templates/kubenet-peer.service.j2 create mode 100644 roles/kubernetes/node/tasks/add.yml create mode 100644 roles/kubernetes/node/tasks/main.yml create mode 100644 roles/kubernetes/node/tasks/remove.yml diff --git a/playbooks/k8s-emc.yml b/playbooks/k8s-emc.yml index b47ed5fc..b6f09808 100644 --- a/playbooks/k8s-emc.yml +++ b/playbooks/k8s-emc.yml @@ -34,18 +34,18 @@ ## upon first startup (the first time this playbook runs on a specific host). ## Since it is a tedious task to remove the interface and the firewall rules it is much ## easier to just run `net` before `base` as `net` does not need anything from `base`. - - role: kubernetes-net - - role: kubernetes-base + - role: kubernetes/net + - role: kubernetes/base - name: configure kubernetes master hosts: k8s-emc-master roles: - - role: kubernetes-master + - role: kubernetes/master - name: configure kubernetes nodes hosts: k8s-emc:!k8s-emc-master roles: - - role: kubernetes-node + - role: kubernetes/node ######## - name: check for nodes to be removed @@ -74,8 +74,8 @@ vars: k8s_remove_node: yes roles: - - role: kubernetes-node - - role: kubernetes-net + - role: kubernetes/node + - role: kubernetes/net - name: remove node from api server hosts: k8s-emc-master diff --git a/roles/kubernetes-base/files/kubernetes-apt-key.asc b/roles/kubernetes-base/files/kubernetes-apt-key.asc deleted file mode 100644 index 10af13ea..00000000 --- a/roles/kubernetes-base/files/kubernetes-apt-key.asc +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFrBaNsBCADrF18KCbsZlo4NjAvVecTBCnp6WcBQJ5oSh7+E98jX9YznUCrN -rgmeCcCMUvTDRDxfTaDJybaHugfba43nqhkbNpJ47YXsIa+YL6eEE9emSmQtjrSW -IiY+2YJYwsDgsgckF3duqkb02OdBQlh6IbHPoXB6H//b1PgZYsomB+841XW1LSJP -YlYbIrWfwDfQvtkFQI90r6NknVTQlpqQh5GLNWNYqRNrGQPmsB+NrUYrkl1nUt1L -RGu+rCe4bSaSmNbwKMQKkROE4kTiB72DPk7zH4Lm0uo0YFFWG4qsMIuqEihJ/9KN -X8GYBr+tWgyLooLlsdK3l+4dVqd8cjkJM1ExABEBAAG0QEdvb2dsZSBDbG91ZCBQ -YWNrYWdlcyBBdXRvbWF0aWMgU2lnbmluZyBLZXkgPGdjLXRlYW1AZ29vZ2xlLmNv -bT6JAT4EEwECACgFAlrBaNsCGy8FCQWjmoAGCwkIBwMCBhUIAgkKCwQWAgMBAh4B -AheAAAoJEGoDCyG6B/T78e8H/1WH2LN/nVNhm5TS1VYJG8B+IW8zS4BqyozxC9iJ -AJqZIVHXl8g8a/Hus8RfXR7cnYHcg8sjSaJfQhqO9RbKnffiuQgGrqwQxuC2jBa6 -M/QKzejTeP0Mgi67pyrLJNWrFI71RhritQZmzTZ2PoWxfv6b+Tv5v0rPaG+ut1J4 -7pn+kYgtUaKdsJz1umi6HzK6AacDf0C0CksJdKG7MOWsZcB4xeOxJYuy6NuO6Kcd -Ez8/XyEUjIuIOlhYTd0hH8E/SEBbXXft7/VBQC5wNq40izPi+6WFK/e1O42DIpzQ -749ogYQ1eodexPNhLzekKR3XhGrNXJ95r5KO10VrsLFNd8I= -=TKuP ------END PGP PUBLIC KEY BLOCK----- diff --git a/roles/kubernetes-base/handlers/main.yml b/roles/kubernetes-base/handlers/main.yml deleted file mode 100644 index b61c1417..00000000 --- a/roles/kubernetes-base/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: reload systemd - command: systemctl daemon-reload - args: - warn: no diff --git a/roles/kubernetes-base/meta/main.yml b/roles/kubernetes-base/meta/main.yml deleted file mode 100644 index 724b20f1..00000000 --- a/roles/kubernetes-base/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: -- role: docker diff --git a/roles/kubernetes-base/tasks/main.yml b/roles/kubernetes-base/tasks/main.yml deleted file mode 100644 index cc3bc83a..00000000 --- a/roles/kubernetes-base/tasks/main.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -- name: prepare /var/lib/kubelet as LVM - when: kubelet_lvm is defined - block: - - - name: create logical volume - lvol: - vg: "{{ kubelet_lvm.vg }}" - lv: "{{ kubelet_lvm.lv }}" - size: "{{ kubelet_lvm.size }}" - - - name: create filesystem - filesystem: - fstype: "{{ kubelet_lvm.fs }}" - dev: "/dev/mapper/{{ kubelet_lvm.vg | replace('-', '--') }}-{{ kubelet_lvm.lv | replace('-', '--') }}" - - - name: mount filesytem - mount: - src: "/dev/mapper/{{ kubelet_lvm.vg | replace('-', '--') }}-{{ kubelet_lvm.lv | replace('-', '--') }}" - path: /var/lib/kubelet - fstype: "{{ kubelet_lvm.fs }}" - state: mounted - -- name: install apt https transport - apt: - name: apt-transport-https - state: present - force: yes - -- name: add kubernetes apt key - apt_key: - data: "{{ lookup('file', 'kubernetes-apt-key.asc') }}" - state: present - -- name: add kubernetes apt repo - apt_repository: - repo: deb http://apt.kubernetes.io/ kubernetes-xenial main - state: present - filename: kubernetes - -- name: install basic kubernetes components - with_items: - - "kubelet{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" - - "kubeadm{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" - - "kubectl{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" - apt: - name: "{{ item }}" - state: present - -- name: disable automatic upgrades for kubernetes components - when: kubernetes.pkg_version is defined - with_items: - - kubelet - - kubeadm - - kubectl - dpkg_selections: - name: "{{ item }}" - selection: hold - -- name: install kubelet config snippets - with_items: - - 20-dns.conf - - 50-extra.conf - template: - src: "{{ item }}.j2" - dest: "/etc/systemd/system/kubelet.service.d/{{ item }}" - notify: reload systemd - -- name: add dummy group with gid 998 - group: - name: app - gid: 998 - -- name: add dummy user with uid 998 - user: - name: app - uid: 998 - group: app - password: "!" - -- name: add kubectl config for shells - with_items: - - zsh - - bash - blockinfile: - path: "/root/.{{ item }}rc" - create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" - content: | - source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes-base/templates/20-dns.conf.j2 b/roles/kubernetes-base/templates/20-dns.conf.j2 deleted file mode 100644 index 9b7ab32c..00000000 --- a/roles/kubernetes-base/templates/20-dns.conf.j2 +++ /dev/null @@ -1,2 +0,0 @@ -[Service] -Environment="KUBELET_DNS_ARGS=--cluster-dns={{ kubernetes.service_ip_range | ipaddr(10) | ipaddr('address') }} --cluster-domain=cluster.local" diff --git a/roles/kubernetes-base/templates/50-extra.conf.j2 b/roles/kubernetes-base/templates/50-extra.conf.j2 deleted file mode 100644 index c722ade6..00000000 --- a/roles/kubernetes-base/templates/50-extra.conf.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} -[Service] -Environment="KUBELET_EXTRA_ARGS=--node-ip={{ br_net | ipaddr(1) | ipaddr('address') }}" diff --git a/roles/kubernetes-master/tasks/main.yml b/roles/kubernetes-master/tasks/main.yml deleted file mode 100644 index 43c21cae..00000000 --- a/roles/kubernetes-master/tasks/main.yml +++ /dev/null @@ -1,98 +0,0 @@ ---- -- name: check if kubeconfig admin.conf already exists - stat: - path: /etc/kubernetes/admin.conf - register: kubeconfig_admin_stats - -### cluster not yet initialized - -- name: create new cluster - when: kubeconfig_admin_stats.stat.exists == False - block: - - - name: generate bootstrap token for new cluster - command: kubeadm token generate - changed_when: False - check_mode: no - register: kubeadm_token_generate - - - name: set up kubernetes master - command: "kubeadm init --pod-network-cidr {{ kubernetes.pod_ip_range }} --service-cidr {{ kubernetes.service_ip_range }} --apiserver-advertise-address {{ kubernetes.api_advertise_ip | default('0.0.0.0') }} {% if kubernetes.api_extra_sans | length > 0 %}--apiserver-cert-extra-sans {{ kubernetes.api_extra_sans | join(',') }}{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log - -### cluster is already initialized - -- name: prepare cluster for new nodes - when: kubeconfig_admin_stats.stat.exists == True - block: - - - name: fetch list of current nodes - command: kubectl get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'nodes/', '') | list }}" - - - name: create bootstrap token for existing cluster - when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0 - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_create - -## - -- name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" - check_mode: no - register: kubectl_get_node - changed_when: False - -- name: check if master is tainted (2/2) - set_fact: - kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" - -- name: remove taint from master node - when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints" - command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- - -- name: add taint for master node - when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule" - -- name: install openssl - apt: - name: openssl - state: present - -- name: get ca certificate digest - shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" - check_mode: no - register: kube_ca_openssl - changed_when: False - -- name: set variables needed kubernetes-nodes to join the cluster - set_fact: - kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" - kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" - -- name: prepare kubectl (1/2) - file: - name: /root/.kube - state: directory - -- name: prepare kubectl (2/2) - file: - dest: /root/.kube/config - src: /etc/kubernetes/admin.conf - state: link diff --git a/roles/kubernetes-net/files/daemon.json b/roles/kubernetes-net/files/daemon.json deleted file mode 100644 index 28001640..00000000 --- a/roles/kubernetes-net/files/daemon.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "bridge": "none", - "iptables": false -} diff --git a/roles/kubernetes-net/files/kubenet-interfaces.service b/roles/kubernetes-net/files/kubenet-interfaces.service deleted file mode 100644 index f27fb85b..00000000 --- a/roles/kubernetes-net/files/kubenet-interfaces.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Kubernetes Network Interfaces -After=network.target - -[Service] -Type=oneshot -ExecStart=/var/lib/kubenet/ifupdown.sh up -ExecStop=/var/lib/kubenet/ifupdown.sh down -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes-net/filter_plugins/kubenet.py b/roles/kubernetes-net/filter_plugins/kubenet.py deleted file mode 100644 index c1312dd8..00000000 --- a/roles/kubernetes-net/filter_plugins/kubenet.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible import errors - - -def direct_net_zone(data, myname, peer): - try: - zones = [] - for zone in data: - if myname in data[zone]['node_interface'] and peer in data[zone]['node_interface']: - zones.append(zone) - - if not zones: - return "" - if len(zones) > 1: - raise errors.AnsibleFilterError("host '%s' and '%s' have multiple direct net zones in common: %s" % - (myname, peer, zones.join(','))) - return zones[0] - - except Exception as e: - raise errors.AnsibleFilterError("direct_net_zones(): %s" % str(e)) - - -class FilterModule(object): - - ''' Kubernetes Network Filters ''' - filter_map = { - 'direct_net_zone': direct_net_zone, - } - - def filters(self): - return self.filter_map diff --git a/roles/kubernetes-net/handlers/main.yml b/roles/kubernetes-net/handlers/main.yml deleted file mode 100644 index bb7fde2b..00000000 --- a/roles/kubernetes-net/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- name: reload systemd - systemd: - daemon_reload: yes diff --git a/roles/kubernetes-net/meta/main.yml b/roles/kubernetes-net/meta/main.yml deleted file mode 100644 index 03dfcb23..00000000 --- a/roles/kubernetes-net/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: -- role: wireguard - when: k8s_remove_node is not defined diff --git a/roles/kubernetes-net/tasks/add.yml b/roles/kubernetes-net/tasks/add.yml deleted file mode 100644 index f4e422c6..00000000 --- a/roles/kubernetes-net/tasks/add.yml +++ /dev/null @@ -1,114 +0,0 @@ ---- -- name: create docker config directory - file: - name: /etc/docker - state: directory - mode: 0700 - -- name: disable docker iptables and bridge - copy: - src: daemon.json - dest: /etc/docker/daemon.json - -- name: create network config directory - file: - name: /var/lib/kubenet/ - state: directory - -- name: configure wireguard port - set_fact: - kubenet_wireguard_port: "{{ kubernetes.wireguard_port | default(51820) }}" - -- name: install ifupdown script - template: - src: ifupdown.sh.j2 - dest: /var/lib/kubenet/ifupdown.sh - mode: 0755 - # TODO: notify reload... this is unfortunately already to late because - # it must probably be brought down by the old version of the script - -- name: generate wireguard private key - shell: "umask 077; wg genkey > /var/lib/kubenet/kube-wg0.privatekey" - args: - creates: /var/lib/kubenet/kube-wg0.privatekey - -- name: fetch wireguard public key - shell: "wg pubkey < /var/lib/kubenet/kube-wg0.privatekey" - register: kubenet_wireguard_pubkey - changed_when: false - check_mode: no - -- name: install systemd service unit for network interfaces - copy: - src: kubenet-interfaces.service - dest: /etc/systemd/system/kubenet-interfaces.service - # TODO: notify: reload??? - -- name: make sure kubenet interfaces service is started and enabled - systemd: - daemon_reload: yes - name: kubenet-interfaces.service - state: started - enabled: yes - -- name: get list of currently installed kubenet peers - find: - path: /etc/systemd/system/ - pattern: "kubenet-peer-*.service" - register: kubenet_peers_installed - -- name: compute list of peers to be added - set_fact: - kubenet_peers_to_add: "{{ kubernetes_nodes | difference(inventory_hostname) }}" - -- name: compute list of peers to be removed - set_fact: - kubenet_peers_to_remove: "{{ kubenet_peers_installed.files | map(attribute='path') | map('replace', '/etc/systemd/system/kubenet-peer-', '') | map('replace', '.service', '') | difference(kubenet_peers_to_add) }}" - -- name: stop/disable systemd units for stale kubenet peers - with_items: "{{ kubenet_peers_to_remove }}" - systemd: - name: "kubenet-peer-{{ item }}.service" - state: stopped - enabled: no - -- name: remove systemd units for stale kubenet peers - with_items: "{{ kubenet_peers_to_remove }}" - file: - name: "/etc/systemd/system/kubenet-peer-{{ item }}.service" - state: absent - -- name: install systemd units for every kubenet peer - with_items: "{{ kubenet_peers_to_add }}" - loop_control: - loop_var: peer - template: - src: kubenet-peer.service.j2 - dest: "/etc/systemd/system/kubenet-peer-{{ peer }}.service" - # TODO: notify restart for peers that change... - -- name: make sure kubenet peer services are started and enabled - with_items: "{{ kubenet_peers_to_add }}" - systemd: - daemon_reload: yes - name: "kubenet-peer-{{ item }}.service" - state: started - enabled: yes - -- name: enable IPv4 forwarding - sysctl: - name: net.ipv4.ip_forward - value: 1 - sysctl_set: yes - state: present - reload: yes - -- name: create cni config directory - file: - name: /etc/cni/net.d - state: directory - -- name: install cni config - template: - src: k8s.json.j2 - dest: /etc/cni/net.d/k8s.json diff --git a/roles/kubernetes-net/tasks/main.yml b/roles/kubernetes-net/tasks/main.yml deleted file mode 100644 index 8c94292e..00000000 --- a/roles/kubernetes-net/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: add node to overlay network - include_tasks: add.yml - when: k8s_remove_node is not defined - -- name: remove node from overlay network - include_tasks: remove.yml - when: k8s_remove_node is defined diff --git a/roles/kubernetes-net/tasks/remove.yml b/roles/kubernetes-net/tasks/remove.yml deleted file mode 100644 index 6695bd5d..00000000 --- a/roles/kubernetes-net/tasks/remove.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: check if kubenet interface service unit exists - stat: - path: /etc/systemd/system/kubenet-interfaces.service - register: kubenet_interface_unit - -- name: bring down kubenet interface - systemd: - name: kubenet-interfaces.service - state: stopped - when: kubenet_interface_unit.stat.exists - -- name: gather list of all kubenet related service units - find: - path: /etc/systemd/system/ - patterns: - - "kubenet-peer-*.service" - - kubenet-interfaces.service - register: kubenet_units_installed - -- name: remove all kubenet related files and directories - with_flattened: - - "{{ kubenet_units_installed.files | map(attribute='path') | list }}" - - /var/lib/kubenet - file: - path: "{{ item }}" - state: absent - notify: reload systemd diff --git a/roles/kubernetes-net/templates/ifupdown.sh.j2 b/roles/kubernetes-net/templates/ifupdown.sh.j2 deleted file mode 100644 index 995d358b..00000000 --- a/roles/kubernetes-net/templates/ifupdown.sh.j2 +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -set -e - -CONF_D="/var/lib/kubenet/" - -INET_IF="{{ ansible_default_ipv4.interface }}" - -POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" - -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} -BR_IF="kube-br0" -BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" -BR_IP_CIDR="{{ br_net | ipaddr(1) }}" -BR_NET_CIDR="{{ br_net }}" - -TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[inventory_hostname]) }}" - - -case "$1" in - up) - # bring up bridge for local pods - ip link add dev "$BR_IF" type bridge - ip addr add dev "$BR_IF" "$BR_IP_CIDR" - ip link set up dev "$BR_IF" - iptables -t nat -A POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE - modprobe br_netfilter - - # bring up wireguard tunnel to other nodes - ip link add dev "$TUN_IF" type wireguard - ip addr add dev "$TUN_IF" "$TUN_IP_CIDR" - wg set "$TUN_IF" listen-port {{ kubenet_wireguard_port }} private-key "$CONF_D/$TUN_IF.privatekey" - ip link set up dev "$TUN_IF" - - # make pods and service IPs reachable - # !!! use IP of bridge as source so we don't produce martians if direct-zones are involved!!! - ip route add "$POD_NET_CIDR" dev "$TUN_IF" src "$BR_IP" - ;; - down) - # bring down wireguard tunnel to other nodes - ip route del "$POD_NET_CIDR" dev "$TUN_IF" - ip link del dev "$TUN_IF" - - # bring down bridge for local pods - iptables -t nat -D POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE - ip link del dev "$BR_IF" - ;; - *) - echo "usage: $0 (up|down)" - exit 1 - ;; -esac - -exit 0 diff --git a/roles/kubernetes-net/templates/k8s.json.j2 b/roles/kubernetes-net/templates/k8s.json.j2 deleted file mode 100644 index f457ed1c..00000000 --- a/roles/kubernetes-net/templates/k8s.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "k8s", - "type": "bridge", - "bridge": "kube-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) }}" - } -} diff --git a/roles/kubernetes-net/templates/kubenet-peer.service.j2 b/roles/kubernetes-net/templates/kubenet-peer.service.j2 deleted file mode 100644 index bee211af..00000000 --- a/roles/kubernetes-net/templates/kubenet-peer.service.j2 +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] -Description=Kubernetes Network Peer {{ peer }} -After=network.target -Requires=kubenet-interfaces.service -After=kubenet-interfaces.service - -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[peer]) -%} -{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} -{% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[inventory_hostname]) %} -{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[peer]) %} -{% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[peer]) -%} -{% set wg_pubkey = hostvars[peer].kubenet_wireguard_pubkey.stdout -%} -{% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} -{% set wg_port = hostvars[peer].kubenet_wireguard_port -%} -{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} -{% endif %} -[Service] -Type=oneshot -{% if direct_zone %} -ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} -ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} -ExecStop=/sbin/ip route del {{ pod_net_peer }} -ExecStop=/sbin/ip link set down dev {{ direct_interface }} -ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} -{% else %} -ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 -ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove -{% endif %} -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes-node/tasks/add.yml b/roles/kubernetes-node/tasks/add.yml deleted file mode 100644 index dd784b35..00000000 --- a/roles/kubernetes-node/tasks/add.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: get master vars - set_fact: - kube_bootstrap_token: "{{ hostvars[kubernetes_nodes_master].kube_bootstrap_token }}" - kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_nodes_master].kube_bootstrap_ca_cert_hash }}" - kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_nodes_master].ansible_default_ipv4.address) }}" - -- name: join kubernetes node - command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join.changed - copy: - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes-node/tasks/main.yml b/roles/kubernetes-node/tasks/main.yml deleted file mode 100644 index d078d2fb..00000000 --- a/roles/kubernetes-node/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: add node cluster - include_tasks: add.yml - when: k8s_remove_node is not defined - -- name: remove node from cluster - include_tasks: remove.yml - when: k8s_remove_node is defined diff --git a/roles/kubernetes-node/tasks/remove.yml b/roles/kubernetes-node/tasks/remove.yml deleted file mode 100644 index 95787629..00000000 --- a/roles/kubernetes-node/tasks/remove.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: clean up settings and files created by kubeadm - command: kubeadm reset diff --git a/roles/kubernetes/base/files/kubernetes-apt-key.asc b/roles/kubernetes/base/files/kubernetes-apt-key.asc new file mode 100644 index 00000000..10af13ea --- /dev/null +++ b/roles/kubernetes/base/files/kubernetes-apt-key.asc @@ -0,0 +1,18 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFrBaNsBCADrF18KCbsZlo4NjAvVecTBCnp6WcBQJ5oSh7+E98jX9YznUCrN +rgmeCcCMUvTDRDxfTaDJybaHugfba43nqhkbNpJ47YXsIa+YL6eEE9emSmQtjrSW +IiY+2YJYwsDgsgckF3duqkb02OdBQlh6IbHPoXB6H//b1PgZYsomB+841XW1LSJP +YlYbIrWfwDfQvtkFQI90r6NknVTQlpqQh5GLNWNYqRNrGQPmsB+NrUYrkl1nUt1L +RGu+rCe4bSaSmNbwKMQKkROE4kTiB72DPk7zH4Lm0uo0YFFWG4qsMIuqEihJ/9KN +X8GYBr+tWgyLooLlsdK3l+4dVqd8cjkJM1ExABEBAAG0QEdvb2dsZSBDbG91ZCBQ +YWNrYWdlcyBBdXRvbWF0aWMgU2lnbmluZyBLZXkgPGdjLXRlYW1AZ29vZ2xlLmNv +bT6JAT4EEwECACgFAlrBaNsCGy8FCQWjmoAGCwkIBwMCBhUIAgkKCwQWAgMBAh4B +AheAAAoJEGoDCyG6B/T78e8H/1WH2LN/nVNhm5TS1VYJG8B+IW8zS4BqyozxC9iJ +AJqZIVHXl8g8a/Hus8RfXR7cnYHcg8sjSaJfQhqO9RbKnffiuQgGrqwQxuC2jBa6 +M/QKzejTeP0Mgi67pyrLJNWrFI71RhritQZmzTZ2PoWxfv6b+Tv5v0rPaG+ut1J4 +7pn+kYgtUaKdsJz1umi6HzK6AacDf0C0CksJdKG7MOWsZcB4xeOxJYuy6NuO6Kcd +Ez8/XyEUjIuIOlhYTd0hH8E/SEBbXXft7/VBQC5wNq40izPi+6WFK/e1O42DIpzQ +749ogYQ1eodexPNhLzekKR3XhGrNXJ95r5KO10VrsLFNd8I= +=TKuP +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/kubernetes/base/handlers/main.yml b/roles/kubernetes/base/handlers/main.yml new file mode 100644 index 00000000..b61c1417 --- /dev/null +++ b/roles/kubernetes/base/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: reload systemd + command: systemctl daemon-reload + args: + warn: no diff --git a/roles/kubernetes/base/meta/main.yml b/roles/kubernetes/base/meta/main.yml new file mode 100644 index 00000000..724b20f1 --- /dev/null +++ b/roles/kubernetes/base/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- role: docker diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml new file mode 100644 index 00000000..cc3bc83a --- /dev/null +++ b/roles/kubernetes/base/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- name: prepare /var/lib/kubelet as LVM + when: kubelet_lvm is defined + block: + + - name: create logical volume + lvol: + vg: "{{ kubelet_lvm.vg }}" + lv: "{{ kubelet_lvm.lv }}" + size: "{{ kubelet_lvm.size }}" + + - name: create filesystem + filesystem: + fstype: "{{ kubelet_lvm.fs }}" + dev: "/dev/mapper/{{ kubelet_lvm.vg | replace('-', '--') }}-{{ kubelet_lvm.lv | replace('-', '--') }}" + + - name: mount filesytem + mount: + src: "/dev/mapper/{{ kubelet_lvm.vg | replace('-', '--') }}-{{ kubelet_lvm.lv | replace('-', '--') }}" + path: /var/lib/kubelet + fstype: "{{ kubelet_lvm.fs }}" + state: mounted + +- name: install apt https transport + apt: + name: apt-transport-https + state: present + force: yes + +- name: add kubernetes apt key + apt_key: + data: "{{ lookup('file', 'kubernetes-apt-key.asc') }}" + state: present + +- name: add kubernetes apt repo + apt_repository: + repo: deb http://apt.kubernetes.io/ kubernetes-xenial main + state: present + filename: kubernetes + +- name: install basic kubernetes components + with_items: + - "kubelet{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" + - "kubeadm{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" + - "kubectl{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" + apt: + name: "{{ item }}" + state: present + +- name: disable automatic upgrades for kubernetes components + when: kubernetes.pkg_version is defined + with_items: + - kubelet + - kubeadm + - kubectl + dpkg_selections: + name: "{{ item }}" + selection: hold + +- name: install kubelet config snippets + with_items: + - 20-dns.conf + - 50-extra.conf + template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/kubelet.service.d/{{ item }}" + notify: reload systemd + +- name: add dummy group with gid 998 + group: + name: app + gid: 998 + +- name: add dummy user with uid 998 + user: + name: app + uid: 998 + group: app + password: "!" + +- name: add kubectl config for shells + with_items: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + content: | + source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/base/templates/20-dns.conf.j2 b/roles/kubernetes/base/templates/20-dns.conf.j2 new file mode 100644 index 00000000..9b7ab32c --- /dev/null +++ b/roles/kubernetes/base/templates/20-dns.conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment="KUBELET_DNS_ARGS=--cluster-dns={{ kubernetes.service_ip_range | ipaddr(10) | ipaddr('address') }} --cluster-domain=cluster.local" diff --git a/roles/kubernetes/base/templates/50-extra.conf.j2 b/roles/kubernetes/base/templates/50-extra.conf.j2 new file mode 100644 index 00000000..c722ade6 --- /dev/null +++ b/roles/kubernetes/base/templates/50-extra.conf.j2 @@ -0,0 +1,3 @@ +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} +[Service] +Environment="KUBELET_EXTRA_ARGS=--node-ip={{ br_net | ipaddr(1) | ipaddr('address') }}" diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml new file mode 100644 index 00000000..c9092bf3 --- /dev/null +++ b/roles/kubernetes/master/tasks/main.yml @@ -0,0 +1,98 @@ +--- +- name: check if kubeconfig admin.conf already exists + stat: + path: /etc/kubernetes/admin.conf + register: kubeconfig_admin_stats + +### cluster not yet initialized + +- name: create new cluster + when: kubeconfig_admin_stats.stat.exists == False + block: + + - name: generate bootstrap token for new cluster + command: kubeadm token generate + changed_when: False + check_mode: no + register: kubeadm_token_generate + + - name: set up kubernetes master + command: "kubeadm init --pod-network-cidr {{ kubernetes.pod_ip_range }} --service-cidr {{ kubernetes.service_ip_range }} --apiserver-advertise-address {{ kubernetes.api_advertise_ip | default('0.0.0.0') }} {% if kubernetes.api_extra_sans | length > 0 %}--apiserver-cert-extra-sans {{ kubernetes.api_extra_sans | join(',') }}{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_admin_stats.stat.exists == True + block: + + - name: fetch list of current nodes + command: kubectl get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'nodes/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0 + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + +## + +- name: check if master is tainted (1/2) + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" + check_mode: no + register: kubectl_get_node + changed_when: False + +- name: check if master is tainted (2/2) + set_fact: + kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" + +- name: remove taint from master node + when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints" + command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- + +- name: add taint for master node + when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule" + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + +- name: prepare kubectl (1/2) + file: + name: /root/.kube + state: directory + +- name: prepare kubectl (2/2) + file: + dest: /root/.kube/config + src: /etc/kubernetes/admin.conf + state: link diff --git a/roles/kubernetes/net/files/daemon.json b/roles/kubernetes/net/files/daemon.json new file mode 100644 index 00000000..28001640 --- /dev/null +++ b/roles/kubernetes/net/files/daemon.json @@ -0,0 +1,4 @@ +{ + "bridge": "none", + "iptables": false +} diff --git a/roles/kubernetes/net/files/kubenet-interfaces.service b/roles/kubernetes/net/files/kubenet-interfaces.service new file mode 100644 index 00000000..f27fb85b --- /dev/null +++ b/roles/kubernetes/net/files/kubenet-interfaces.service @@ -0,0 +1,12 @@ +[Unit] +Description=Kubernetes Network Interfaces +After=network.target + +[Service] +Type=oneshot +ExecStart=/var/lib/kubenet/ifupdown.sh up +ExecStop=/var/lib/kubenet/ifupdown.sh down +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/net/filter_plugins/kubenet.py b/roles/kubernetes/net/filter_plugins/kubenet.py new file mode 100644 index 00000000..c1312dd8 --- /dev/null +++ b/roles/kubernetes/net/filter_plugins/kubenet.py @@ -0,0 +1,33 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import errors + + +def direct_net_zone(data, myname, peer): + try: + zones = [] + for zone in data: + if myname in data[zone]['node_interface'] and peer in data[zone]['node_interface']: + zones.append(zone) + + if not zones: + return "" + if len(zones) > 1: + raise errors.AnsibleFilterError("host '%s' and '%s' have multiple direct net zones in common: %s" % + (myname, peer, zones.join(','))) + return zones[0] + + except Exception as e: + raise errors.AnsibleFilterError("direct_net_zones(): %s" % str(e)) + + +class FilterModule(object): + + ''' Kubernetes Network Filters ''' + filter_map = { + 'direct_net_zone': direct_net_zone, + } + + def filters(self): + return self.filter_map diff --git a/roles/kubernetes/net/handlers/main.yml b/roles/kubernetes/net/handlers/main.yml new file mode 100644 index 00000000..bb7fde2b --- /dev/null +++ b/roles/kubernetes/net/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: reload systemd + systemd: + daemon_reload: yes diff --git a/roles/kubernetes/net/meta/main.yml b/roles/kubernetes/net/meta/main.yml new file mode 100644 index 00000000..03dfcb23 --- /dev/null +++ b/roles/kubernetes/net/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: +- role: wireguard + when: k8s_remove_node is not defined diff --git a/roles/kubernetes/net/tasks/add.yml b/roles/kubernetes/net/tasks/add.yml new file mode 100644 index 00000000..f4e422c6 --- /dev/null +++ b/roles/kubernetes/net/tasks/add.yml @@ -0,0 +1,114 @@ +--- +- name: create docker config directory + file: + name: /etc/docker + state: directory + mode: 0700 + +- name: disable docker iptables and bridge + copy: + src: daemon.json + dest: /etc/docker/daemon.json + +- name: create network config directory + file: + name: /var/lib/kubenet/ + state: directory + +- name: configure wireguard port + set_fact: + kubenet_wireguard_port: "{{ kubernetes.wireguard_port | default(51820) }}" + +- name: install ifupdown script + template: + src: ifupdown.sh.j2 + dest: /var/lib/kubenet/ifupdown.sh + mode: 0755 + # TODO: notify reload... this is unfortunately already to late because + # it must probably be brought down by the old version of the script + +- name: generate wireguard private key + shell: "umask 077; wg genkey > /var/lib/kubenet/kube-wg0.privatekey" + args: + creates: /var/lib/kubenet/kube-wg0.privatekey + +- name: fetch wireguard public key + shell: "wg pubkey < /var/lib/kubenet/kube-wg0.privatekey" + register: kubenet_wireguard_pubkey + changed_when: false + check_mode: no + +- name: install systemd service unit for network interfaces + copy: + src: kubenet-interfaces.service + dest: /etc/systemd/system/kubenet-interfaces.service + # TODO: notify: reload??? + +- name: make sure kubenet interfaces service is started and enabled + systemd: + daemon_reload: yes + name: kubenet-interfaces.service + state: started + enabled: yes + +- name: get list of currently installed kubenet peers + find: + path: /etc/systemd/system/ + pattern: "kubenet-peer-*.service" + register: kubenet_peers_installed + +- name: compute list of peers to be added + set_fact: + kubenet_peers_to_add: "{{ kubernetes_nodes | difference(inventory_hostname) }}" + +- name: compute list of peers to be removed + set_fact: + kubenet_peers_to_remove: "{{ kubenet_peers_installed.files | map(attribute='path') | map('replace', '/etc/systemd/system/kubenet-peer-', '') | map('replace', '.service', '') | difference(kubenet_peers_to_add) }}" + +- name: stop/disable systemd units for stale kubenet peers + with_items: "{{ kubenet_peers_to_remove }}" + systemd: + name: "kubenet-peer-{{ item }}.service" + state: stopped + enabled: no + +- name: remove systemd units for stale kubenet peers + with_items: "{{ kubenet_peers_to_remove }}" + file: + name: "/etc/systemd/system/kubenet-peer-{{ item }}.service" + state: absent + +- name: install systemd units for every kubenet peer + with_items: "{{ kubenet_peers_to_add }}" + loop_control: + loop_var: peer + template: + src: kubenet-peer.service.j2 + dest: "/etc/systemd/system/kubenet-peer-{{ peer }}.service" + # TODO: notify restart for peers that change... + +- name: make sure kubenet peer services are started and enabled + with_items: "{{ kubenet_peers_to_add }}" + systemd: + daemon_reload: yes + name: "kubenet-peer-{{ item }}.service" + state: started + enabled: yes + +- name: enable IPv4 forwarding + sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + state: present + reload: yes + +- name: create cni config directory + file: + name: /etc/cni/net.d + state: directory + +- name: install cni config + template: + src: k8s.json.j2 + dest: /etc/cni/net.d/k8s.json diff --git a/roles/kubernetes/net/tasks/main.yml b/roles/kubernetes/net/tasks/main.yml new file mode 100644 index 00000000..8c94292e --- /dev/null +++ b/roles/kubernetes/net/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: add node to overlay network + include_tasks: add.yml + when: k8s_remove_node is not defined + +- name: remove node from overlay network + include_tasks: remove.yml + when: k8s_remove_node is defined diff --git a/roles/kubernetes/net/tasks/remove.yml b/roles/kubernetes/net/tasks/remove.yml new file mode 100644 index 00000000..6695bd5d --- /dev/null +++ b/roles/kubernetes/net/tasks/remove.yml @@ -0,0 +1,28 @@ +--- +- name: check if kubenet interface service unit exists + stat: + path: /etc/systemd/system/kubenet-interfaces.service + register: kubenet_interface_unit + +- name: bring down kubenet interface + systemd: + name: kubenet-interfaces.service + state: stopped + when: kubenet_interface_unit.stat.exists + +- name: gather list of all kubenet related service units + find: + path: /etc/systemd/system/ + patterns: + - "kubenet-peer-*.service" + - kubenet-interfaces.service + register: kubenet_units_installed + +- name: remove all kubenet related files and directories + with_flattened: + - "{{ kubenet_units_installed.files | map(attribute='path') | list }}" + - /var/lib/kubenet + file: + path: "{{ item }}" + state: absent + notify: reload systemd diff --git a/roles/kubernetes/net/templates/ifupdown.sh.j2 b/roles/kubernetes/net/templates/ifupdown.sh.j2 new file mode 100644 index 00000000..995d358b --- /dev/null +++ b/roles/kubernetes/net/templates/ifupdown.sh.j2 @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +CONF_D="/var/lib/kubenet/" + +INET_IF="{{ ansible_default_ipv4.interface }}" + +POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" + +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} +BR_IF="kube-br0" +BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" +BR_IP_CIDR="{{ br_net | ipaddr(1) }}" +BR_NET_CIDR="{{ br_net }}" + +TUN_IF="kube-wg0" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[inventory_hostname]) }}" + + +case "$1" in + up) + # bring up bridge for local pods + ip link add dev "$BR_IF" type bridge + ip addr add dev "$BR_IF" "$BR_IP_CIDR" + ip link set up dev "$BR_IF" + iptables -t nat -A POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE + modprobe br_netfilter + + # bring up wireguard tunnel to other nodes + ip link add dev "$TUN_IF" type wireguard + ip addr add dev "$TUN_IF" "$TUN_IP_CIDR" + wg set "$TUN_IF" listen-port {{ kubenet_wireguard_port }} private-key "$CONF_D/$TUN_IF.privatekey" + ip link set up dev "$TUN_IF" + + # make pods and service IPs reachable + # !!! use IP of bridge as source so we don't produce martians if direct-zones are involved!!! + ip route add "$POD_NET_CIDR" dev "$TUN_IF" src "$BR_IP" + ;; + down) + # bring down wireguard tunnel to other nodes + ip route del "$POD_NET_CIDR" dev "$TUN_IF" + ip link del dev "$TUN_IF" + + # bring down bridge for local pods + iptables -t nat -D POSTROUTING -s "$BR_NET_CIDR" -o "$INET_IF" -j MASQUERADE + ip link del dev "$BR_IF" + ;; + *) + echo "usage: $0 (up|down)" + exit 1 + ;; +esac + +exit 0 diff --git a/roles/kubernetes/net/templates/k8s.json.j2 b/roles/kubernetes/net/templates/k8s.json.j2 new file mode 100644 index 00000000..f457ed1c --- /dev/null +++ b/roles/kubernetes/net/templates/k8s.json.j2 @@ -0,0 +1,12 @@ +{ + "cniVersion": "0.3.1", + "name": "k8s", + "type": "bridge", + "bridge": "kube-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) }}" + } +} diff --git a/roles/kubernetes/net/templates/kubenet-peer.service.j2 b/roles/kubernetes/net/templates/kubenet-peer.service.j2 new file mode 100644 index 00000000..bee211af --- /dev/null +++ b/roles/kubernetes/net/templates/kubenet-peer.service.j2 @@ -0,0 +1,36 @@ +[Unit] +Description=Kubernetes Network Peer {{ peer }} +After=network.target +Requires=kubenet-interfaces.service +After=kubenet-interfaces.service + +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[peer]) -%} +{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} +{% if direct_zone %} +{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[inventory_hostname]) %} +{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} +{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[peer]) %} +{% else %} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[peer]) -%} +{% set wg_pubkey = hostvars[peer].kubenet_wireguard_pubkey.stdout -%} +{% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} +{% set wg_port = hostvars[peer].kubenet_wireguard_port -%} +{% set wg_allowedips = (tun_ip | ipaddr('address')) + "/32," + pod_net_peer %} +{% endif %} +[Service] +Type=oneshot +{% if direct_zone %} +ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} +ExecStart=/sbin/ip link set up dev {{ direct_interface }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStop=/sbin/ip route del {{ pod_net_peer }} +ExecStop=/sbin/ip link set down dev {{ direct_interface }} +ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} +{% else %} +ExecStart=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} allowed-ips {{ wg_allowedips }} endpoint {{ wg_host }}:{{ wg_port }} persistent-keepalive 10 +ExecStop=/usr/bin/wg set kube-wg0 peer {{ wg_pubkey }} remove +{% endif %} +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/roles/kubernetes/node/tasks/add.yml b/roles/kubernetes/node/tasks/add.yml new file mode 100644 index 00000000..dd784b35 --- /dev/null +++ b/roles/kubernetes/node/tasks/add.yml @@ -0,0 +1,18 @@ +--- +- name: get master vars + set_fact: + kube_bootstrap_token: "{{ hostvars[kubernetes_nodes_master].kube_bootstrap_token }}" + kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_nodes_master].kube_bootstrap_ca_cert_hash }}" + kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_nodes_master].ansible_default_ipv4.address) }}" + +- name: join kubernetes node + command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + +- name: dump output of kubeadm join to log file + when: kubeadm_join.changed + copy: + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml new file mode 100644 index 00000000..d078d2fb --- /dev/null +++ b/roles/kubernetes/node/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: add node cluster + include_tasks: add.yml + when: k8s_remove_node is not defined + +- name: remove node from cluster + include_tasks: remove.yml + when: k8s_remove_node is defined diff --git a/roles/kubernetes/node/tasks/remove.yml b/roles/kubernetes/node/tasks/remove.yml new file mode 100644 index 00000000..95787629 --- /dev/null +++ b/roles/kubernetes/node/tasks/remove.yml @@ -0,0 +1,3 @@ +--- +- name: clean up settings and files created by kubeadm + command: kubeadm reset -- cgit v1.2.3