diff options
Diffstat (limited to 'roles')
22 files changed, 376 insertions, 157 deletions
diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml new file mode 100644 index 00000000..aa34e6fe --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_containerd.yml @@ -0,0 +1,4 @@ +--- +- name: install containerd + include_role: + name: containerd diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml new file mode 100644 index 00000000..67196f51 --- /dev/null +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -0,0 +1,8 @@ +--- +- name: disable bridge and iptables in docker daemon config + set_fact: + docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}" + +- name: install docker + include_role: + name: docker diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml index 9c91e347..f1802b0c 100644 --- a/roles/kubernetes/base/tasks/main.yml +++ b/roles/kubernetes/base/tasks/main.yml @@ -1,4 +1,7 @@ --- +- name: install container runtime + include_tasks: "cri_{{ kubernetes_container_runtime }}.yml" + - name: prepare /var/lib/kubelet as LVM when: kubelet_lvm is defined import_tasks: lvm.yml @@ -66,11 +69,11 @@ - name: add dummy group with gid 998 group: name: app - gid: 998 + gid: 990 - name: add dummy user with uid 998 user: name: app - uid: 998 + uid: 990 group: app password: "!" diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 2d9b9eed..8e913560 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -2,6 +2,8 @@ - name: install kubeadm and kubectl apt: name: + - haproxy + - hatop - "kubeadm{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" - "kubectl{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" state: present @@ -16,19 +18,43 @@ selection: hold - name: set kubelet node-ip + when: kubernetes_overlay_node_ip is defined lineinfile: name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' - line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) | ipaddr(1) | ipaddr("address") }}' + line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_overlay_node_ip }}' + create: yes -- name: add kubectl/kubeadm completion for shells +- name: add kubeadm completion for shells loop: - zsh - bash blockinfile: path: "/root/.{{ item }}rc" create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubeadm ###" content: | - source <(kubectl completion {{ item }}) source <(kubeadm completion {{ item }}) + +- name: configure haproxy + template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + register: haproxy_config + +- name: (re)start haproxy + systemd: + name: haproxy + state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}" + enabled: yes + +- name: add hatop config for shells + loop: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###" + content: | + alias hatop="hatop -s /var/run/haproxy/admin.sock" diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 new file mode 100644 index 00000000..3de6ac00 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 @@ -0,0 +1,36 @@ +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +frontend kube_api +{% if '_kubernetes_masters_' in group_names %} + bind *:6443 +{% else %} + bind 127.0.0.1:6443 +{% endif %} + mode tcp + timeout client 3h + default_backend kube_api + +backend kube_api + mode tcp +{% if '_kubernetes_masters_' in group_names %} + balance first +{% else %} + balance roundrobin +{% endif %} + option log-health-checks + option httpchk GET /healthz + http-check expect string ok + default-server inter 5s fall 3 rise 2 + timeout connect 5s + timeout server 3h +{% for master in groups['_kubernetes_masters_'] %} + server {{ hostvars[master].inventory_hostname }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none +{% endfor %} diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 7cc6fe94..9af041b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -1,67 +1,25 @@ --- -- name: check if kubeconfig admin.conf already exists - stat: - path: /etc/kubernetes/admin.conf - register: kubeconfig_admin_stats +# - name: create direcotry for encryption config +# file: +# name: /etc/kubernetes/encryption +# state: directory +# mode: 0700 -### cluster not yet initialized +# - name: install encryption config +# template: +# src: encryption-config.j2 +# dest: /etc/kubernetes/encryption/config +# mode: 0600 -- name: create new cluster - when: kubeconfig_admin_stats.stat.exists == False - block: - - name: generate bootstrap token for new cluster - command: kubeadm token generate - changed_when: False - check_mode: no - register: kubeadm_token_generate +- name: install primary master + include_tasks: primary-master.yml + when: "'_kubernetes_primary_master_' in group_names" - - name: create kubernetes config directory - file: - path: /etc/kubernetes - state: directory +- name: install secondary masters + include_tasks: secondary-masters.yml + when: "'_kubernetes_primary_master_' not in group_names" - ## TODO test whether the generated cluster configs really works - since it has never been used... - - name: install cluster config for kubeadm - template: - src: kubeadm-cluster.config.j2 - dest: /etc/kubernetes/kubeadm-cluster.config - - - name: set up kubernetes master - command: "kubeadm init --config '/etc/kubernetes/kubeadm-cluster.config' --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log - -### cluster is already initialized - -- name: prepare cluster for new nodes - when: kubeconfig_admin_stats.stat.exists == True - block: - - - name: fetch list of current nodes - command: kubectl get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - - - name: create bootstrap token for existing cluster - when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0 - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_create - -## - name: check if master is tainted (1/2) command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" @@ -74,28 +32,13 @@ kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" - name: remove taint from master node - when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints" - command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- + when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - name: add taint for master node - when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule" - -- name: install openssl - apt: - name: openssl - state: present + when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" -- name: get ca certificate digest - shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" - check_mode: no - register: kube_ca_openssl - changed_when: False - -- name: set variables needed by kubernetes/nodes to join the cluster - set_fact: - kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" - kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" - name: prepare kubectl (1/2) file: @@ -107,3 +50,14 @@ dest: /root/.kube/config src: /etc/kubernetes/admin.conf state: link + +- name: add kubectl completion config for shells + with_items: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + content: | + source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml new file mode 100644 index 00000000..115c8616 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -0,0 +1,109 @@ +--- +- name: check if kubeconfig kubelet.conf already exists + stat: + path: /etc/kubernetes/kubelet.conf + register: kubeconfig_kubelet_stats + +- name: generate kubeadm.config + template: + src: kubeadm.config.j2 + dest: /etc/kubernetes/kubeadm.config + register: kubeadm_config + +### cluster not yet initialized + +- name: create new cluster + when: not kubeconfig_kubelet_stats.stat.exists + block: + + #### kubeadm wants token to come from --config if --config is used + #### i think this is stupid -> TODO: send bug report + # - name: generate bootstrap token for new cluster + # command: kubeadm token generate + # changed_when: False + # check_mode: no + # register: kubeadm_token_generate + + - name: initialize kubernetes master and store log + block: + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + + - name: create bootstrap token for existing cluster + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_generate + + +### cluster is already initialized but config has changed + +- name: upgrade cluster config + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed + block: + + - name: fail for cluster upgrades + fail: + msg: "upgrading cluster config is currently not supported!" + + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + + +## calculate certificate digest + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + args: + executable: /bin/bash + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['_kubernetes_nodes_'] }}" + +## Network Plugin + +# - name: install network plugin +# include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml new file mode 100644 index 00000000..c00c3203 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -0,0 +1,48 @@ +--- +- name: fetch secrets needed for secondary master + run_once: true + delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: upload certs + when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm init phase upload-certs --upload-certs + check_mode: no + register: kubeadm_upload_certs + + +- name: extracting encryption key for certs + set_fact: + kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" + +- name: join kubernetes secondary master node and store log + block: + - name: join kubernetes secondary master node + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + # TODO: acutally check if node has registered +- name: give the new master(s) a moment to register + when: kubeadm_join is changed + pause: # noqa 503 + seconds: 5 diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 new file mode 100644 index 00000000..345c9bf9 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 @@ -0,0 +1,13 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - secretbox: + keys: +{% for key in kubernetes_secrets.encryption_config_keys %} + - name: key{{ loop.index }} + secret: {{ key }} +{% endfor %} + - identity: {} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 deleted file mode 100644 index 07c4dddd..00000000 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ /dev/null @@ -1,34 +0,0 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: v{{ kubernetes.version }} -clusterName: {{ kubernetes.cluster_name }} -certificatesDir: /etc/kubernetes/pki -{% if kubernetes.api_advertise_ip %} -controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443" -{% endif %} -imageRepository: k8s.gcr.io -networking: - dnsDomain: cluster.local - podSubnet: {{ kubernetes.pod_ip_range }} - serviceSubnet: {{ kubernetes.service_ip_range }} -etcd: - local: - dataDir: /var/lib/etcd -apiServer: -{% if kubernetes.api_extra_sans | length > 0 %} - certSANs: -{% for san in kubernetes.api_extra_sans %} - - {{ san }} -{% endfor %} -{% endif %} - extraArgs: -{% if kubernetes.api_advertise_ip %} - advertise-address: {{ kubernetes.api_advertise_ip }} -{% endif %} - authorization-mode: Node,RBAC - timeoutForControlPlane: 4m0s -controllerManager: {} -scheduler: {} -dns: - type: CoreDNS diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 new file mode 100644 index 00000000..f48a34f3 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -0,0 +1,45 @@ +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +localAPIEndpoint: + bindPort: 6442 +{% if kubernetes_overlay_node_ip is defined %} + advertiseAddress: {{ kubernetes_overlay_node_ip }} +{% endif %} +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: {{ kubernetes_version }} +clusterName: {{ kubernetes.cluster_name }} +imageRepository: k8s.gcr.io +controlPlaneEndpoint: 127.0.0.1:6443 +networking: + dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} + podSubnet: {{ kubernetes.pod_ip_range }} + serviceSubnet: {{ kubernetes.service_ip_range }} +apiServer: + # extraArgs: + # encryption-provider-config: /etc/kubernetes/encryption/config + # extraVolumes: + # - name: encryption-config + # hostPath: /etc/kubernetes/encryption + # mountPath: /etc/kubernetes/encryption + # readOnly: true + # pathType: Directory +{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} + certSANs: [] +{% else %} + certSANs: + {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} +{% endif %} +controllerManager: + extraArgs: + node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" +scheduler: {} +dns: + type: CoreDNS diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 9f0057f9..1d5178ea 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,18 +1,16 @@ --- -- name: get master vars - set_fact: - kube_bootstrap_token: "{{ hostvars[kubernetes_master].kube_bootstrap_token }}" - kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_master].kube_bootstrap_ca_cert_hash }}" - kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_master].ansible_default_ipv4.address) }}" +- name: join kubernetes node and store log + block: + - name: join kubernetes node + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join -- name: join kubernetes node - command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join.changed - copy: - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index a6d64c7d..f0e88e53 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -1,3 +1,13 @@ --- - name: clean up settings and files created by kubeadm command: kubeadm reset -f + +- name: clean up extra configs and logs + loop: + - /etc/kubernetes/kubeadm.config + - /etc/kubernetes/kubeadm-init.log + - /etc/kubernetes/kubeadm-join.log + - /etc/kubernetes/pki + file: + path: "{{ item }}" + state: absent diff --git a/roles/kubernetes/net/kubeguard/defaults/main.yml b/roles/kubernetes/net/kubeguard/defaults/main.yml new file mode 100644 index 00000000..acabaa25 --- /dev/null +++ b/roles/kubernetes/net/kubeguard/defaults/main.yml @@ -0,0 +1,2 @@ +--- +kubeguard_action: add diff --git a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service index f45df88a..35fc8f90 100644 --- a/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service +++ b/roles/kubernetes/net/kubeguard/files/kubeguard-interfaces.service @@ -1,5 +1,5 @@ [Unit] -Description=Kubernetes Network Interfaces +Description=Kubeguard Network Setup After=network.target [Service] diff --git a/roles/kubernetes/net/kubeguard/meta/main.yml b/roles/kubernetes/net/kubeguard/meta/main.yml deleted file mode 100644 index 39c7d694..00000000 --- a/roles/kubernetes/net/kubeguard/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: -- role: wireguard/base - when: kubeguard_remove_node is not defined diff --git a/roles/kubernetes/net/kubeguard/tasks/add.yml b/roles/kubernetes/net/kubeguard/tasks/add.yml index b604302b..0658b42c 100644 --- a/roles/kubernetes/net/kubeguard/tasks/add.yml +++ b/roles/kubernetes/net/kubeguard/tasks/add.yml @@ -1,4 +1,8 @@ --- +- name: install wireguard + import_role: + name: wireguard/base + - name: create network config directory file: name: /var/lib/kubeguard/ @@ -48,7 +52,7 @@ - name: compute list of peers to be added set_fact: - kubeguard_peers_to_add: "{{ kubernetes_nodes | difference(inventory_hostname) }}" + kubeguard_peers_to_add: "{{ groups['_kubernetes_nodes_'] | difference(inventory_hostname) }}" - name: compute list of peers to be removed set_fact: @@ -87,7 +91,7 @@ - name: enable IPv4 forwarding sysctl: name: net.ipv4.ip_forward - value: 1 + value: '1' sysctl_set: yes state: present reload: yes diff --git a/roles/kubernetes/net/kubeguard/tasks/main.yml b/roles/kubernetes/net/kubeguard/tasks/main.yml index 0e87af11..10b0d547 100644 --- a/roles/kubernetes/net/kubeguard/tasks/main.yml +++ b/roles/kubernetes/net/kubeguard/tasks/main.yml @@ -1,8 +1,3 @@ --- -- name: add node to overlay network - include_tasks: add.yml - when: kubeguard_remove_node is not defined - -- name: remove node from overlay network - include_tasks: remove.yml - when: kubeguard_remove_node is defined +- name: add/remove nodes to overlay network + include_tasks: "{{ kubeguard_action }}.yml" diff --git a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 index 87849ee9..98b38cf4 100644 --- a/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 +++ b/roles/kubernetes/net/kubeguard/templates/ifupdown.sh.j2 @@ -8,14 +8,14 @@ INET_IF="{{ ansible_default_ipv4.interface }}" POD_NET_CIDR="{{ kubernetes.pod_ip_range }}" -{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) -%} +{% set br_net = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) -%} BR_IF="kube-br0" BR_IP="{{ br_net | ipaddr(1) | ipaddr('address') }}" BR_IP_CIDR="{{ br_net | ipaddr(1) }}" BR_NET_CIDR="{{ br_net }}" TUN_IF="kube-wg0" -TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[inventory_hostname]) }}" +TUN_IP_CIDR="{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[inventory_hostname]) }}" case "$1" in diff --git a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 index f457ed1c..65b1357a 100644 --- a/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 +++ b/roles/kubernetes/net/kubeguard/templates/k8s.json.j2 @@ -7,6 +7,6 @@ "hairpinMode": true, "ipam": { "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[inventory_hostname]) }}" + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" } } diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 54251caf..9ca444e8 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,14 +4,15 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service -{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubernetes.net_index[peer]) -%} -{% set direct_zone = kubernetes.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} +{% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} +{% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} -{% set direct_ip = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[inventory_hostname]) %} -{% set direct_interface = kubernetes.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} -{% set direct_ip_peer = kubernetes.direct_net_zones[direct_zone].transfer_net | ipaddr(kubernetes.net_index[peer]) %} +{% set direct_ip = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[inventory_hostname]) %} +{% set direct_interface = kubeguard.direct_net_zones[direct_zone].node_interface[inventory_hostname] %} +{% set direct_ip_peer = kubeguard.direct_net_zones[direct_zone].transfer_net | ipaddr(kubeguard.node_index[peer]) %} {% else %} -{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubernetes.net_index[peer]) -%} +{% set tun_ip = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, 0) | ipaddr(kubeguard.node_index[peer]) -%} {% set wg_pubkey = hostvars[peer].kubeguard_wireguard_pubkey.stdout -%} {% set wg_host = hostvars[peer].external_ip | default(hostvars[peer].ansible_default_ipv4.address) -%} {% set wg_port = hostvars[peer].kubeguard_wireguard_port -%} @@ -22,7 +23,7 @@ Type=oneshot {% if direct_zone %} ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} ExecStop=/sbin/ip route del {{ pod_net_peer }} ExecStop=/sbin/ip link set down dev {{ direct_interface }} ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} diff --git a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 b/roles/kubernetes/standalone/templates/kubelet.service.override.j2 index 3a88ccd2..75061e73 100644 --- a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 +++ b/roles/kubernetes/standalone/templates/kubelet.service.override.j2 @@ -6,4 +6,5 @@ ExecStart=/usr/bin/kubelet \ --container-runtime=remote \ --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ {% endif %} + --network-plugin=cni \ --cloud-provider= |