From ddc8db7956cbf68afb1bb49401827e9b55ab139f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 03:35:03 +0100 Subject: kubernetes: new/updated kubeadm master role (WIP) --- .../kubeadm/master/tasks/primary-master.yml | 120 +++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 roles/kubernetes/kubeadm/master/tasks/primary-master.yml (limited to 'roles/kubernetes/kubeadm/master/tasks/primary-master.yml') diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml new file mode 100644 index 00000000..58658794 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -0,0 +1,120 @@ +--- +- name: check if kubeconfig kubelet.conf already exists + stat: + path: /etc/kubernetes/kubelet.conf + register: kubeconfig_kubelet_stats + +- name: generate kubeadm.config + template: + src: kubeadm.config.j2 + dest: /etc/kubernetes/kubeadm.config + register: kubeadm_config + +### cluster not yet initialized + +- name: create new cluster + when: not kubeconfig_kubelet_stats.stat.exists + block: + + #### kubeadm wants token to come from --config if --config is used + #### i think this is stupid -> TODO: send bug report + # - name: generate bootstrap token for new cluster + # command: kubeadm token generate + # changed_when: False + # check_mode: no + # register: kubeadm_token_generate + + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" +# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + + - name: create bootstrap token for existing cluster + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_generate + +### cluster is already initialized but config has changed + +- name: upgrade cluster config + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed + block: + + + - name: fail for cluster upgrades + fail: + msg: "upgrading cluster config is currently not supported!" + + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: "groups['_kubernetes_nodes_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + +## + +## this fixes the kubelet kubeconfig to make use of certificate rotation. This is a bug in +## kubeadm init which was fixed with 1.17 release. TODO: remove this once all cluster have been +## upgraded to 1.17 or newer. +- name: fix kubeconfig of kubelet + lineinfile: + path: /etc/kubernetes/kubelet.conf + backrefs: yes + regexp: '^(\s*)client-{{ item }}(-data)?:' + line: '\1client-{{ item }}: /var/lib/kubelet/pki/kubelet-client-current.pem' + with_items: + - certificate + - key + notify: restart kubelet + + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + args: + executable: /bin/bash + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['_kubernetes_nodes_'] }}" + +## Network Plugin + +# - name: install network plugin +# include_tasks: "net_{{ kubernetes_network_plugin }}.yml" -- cgit v1.2.3 From cd946c702fea849b06e0fd6a19ef5597235caf55 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 17:46:08 +0100 Subject: single master kubernetes cluster works now --- common/kubernetes.yml | 18 +++++++++--------- inventory/group_vars/k8s-test-2019vm/main.yml | 4 ++-- inventory/group_vars/k8s-test-atlas/main.yml | 4 ++-- inventory/group_vars/k8s-test/main.yml | 8 +++++++- .../kubernetes/kubeadm/master/tasks/primary-master.yml | 17 +---------------- .../kubeadm/master/templates/kubeadm.config.j2 | 4 ++-- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- spreadspace/k8s-test.yml | 12 ++++++------ 8 files changed, 30 insertions(+), 39 deletions(-) (limited to 'roles/kubernetes/kubeadm/master/tasks/primary-master.yml') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index c4f3f81e..aaf23219 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -45,14 +45,14 @@ roles: - role: kubernetes/kubeadm/master -# - name: configure kubernetes secondary masters -# hosts: _kubernetes_masters_:!_kubernetes_primary_master_ -# roles: -# - role: kubernetes/kubeadm/master - -# - name: configure kubernetes non-master nodes -# hosts: _kubernetes_nodes_:!_kubernetes_masters_ -# roles: -# - role: kubernetes/kubeadm/node +- name: configure kubernetes secondary masters + hosts: _kubernetes_masters_:!_kubernetes_primary_master_ + roles: + - role: kubernetes/kubeadm/master + +- name: configure kubernetes non-master nodes + hosts: _kubernetes_nodes_:!_kubernetes_masters_ + roles: + - role: kubernetes/kubeadm/node ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/inventory/group_vars/k8s-test-2019vm/main.yml b/inventory/group_vars/k8s-test-2019vm/main.yml index 2cbe5be1..4c08a1bb 100644 --- a/inventory/group_vars/k8s-test-2019vm/main.yml +++ b/inventory/group_vars/k8s-test-2019vm/main.yml @@ -4,7 +4,7 @@ vm_host: sk-2019vm install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -12,7 +12,7 @@ install: type: zfs pool: storage name: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml index 4212cf5e..9838513d 100644 --- a/inventory/group_vars/k8s-test-atlas/main.yml +++ b/inventory/group_vars/k8s-test-atlas/main.yml @@ -6,7 +6,7 @@ vm_host: ch-atlas install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -14,7 +14,7 @@ install: type: lvm vg: "{{ hostvars[vm_host].host_name }}" lv: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index e1b6570f..0d4d0857 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -1,5 +1,11 @@ --- -kubernetes_version: 1.16.4 +containerd_lvm: + vg: "{{ host_name }}" + lv: containerd + size: 4G + fs: ext4 + +kubernetes_version: 1.17.1 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index 58658794..5efc91b5 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -53,7 +53,6 @@ fail: msg: "upgrading cluster config is currently not supported!" - ### cluster is already initialized - name: prepare cluster for new nodes @@ -76,22 +75,8 @@ check_mode: no register: kubeadm_token_create -## - -## this fixes the kubelet kubeconfig to make use of certificate rotation. This is a bug in -## kubeadm init which was fixed with 1.17 release. TODO: remove this once all cluster have been -## upgraded to 1.17 or newer. -- name: fix kubeconfig of kubelet - lineinfile: - path: /etc/kubernetes/kubelet.conf - backrefs: yes - regexp: '^(\s*)client-{{ item }}(-data)?:' - line: '\1client-{{ item }}: /var/lib/kubelet/pki/kubelet-client-current.pem' - with_items: - - certificate - - key - notify: restart kubelet +## calculate certificate digest - name: install openssl apt: diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index e03ea6f6..3c10e59b 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,13 +1,13 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" --- -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 2a140099..dba2ce30 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: join kubernetes node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 27599556..ed56cb78 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -1,10 +1,10 @@ --- -- name: Basic Node Setup - hosts: k8s-test - roles: - - role: base - - role: sshd - - role: zsh +# - name: Basic Node Setup +# hosts: k8s-test +# roles: +# - role: base +# - role: sshd +# - role: zsh - import_playbook: ../common/kubernetes.yml vars: -- cgit v1.2.3 From bb9a03e136bd8d1029bfb2c1cf0be22d28df1576 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 19:53:12 +0100 Subject: kubernetes: node cleanup works now --- common/kubernetes-cleanup.yml | 7 ++----- common/kubernetes.yml | 2 -- roles/kubernetes/kubeadm/master/tasks/main.yml | 6 +++--- roles/kubernetes/kubeadm/master/tasks/primary-master.yml | 4 ++-- roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml | 4 ++-- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- roles/kubernetes/kubeadm/reset/tasks/main.yml | 10 ++++++++++ spreadspace/k8s-test.yml | 5 ++++- 8 files changed, 24 insertions(+), 16 deletions(-) (limited to 'roles/kubernetes/kubeadm/master/tasks/primary-master.yml') diff --git a/common/kubernetes-cleanup.yml b/common/kubernetes-cleanup.yml index a320e0f8..be55d11e 100644 --- a/common/kubernetes-cleanup.yml +++ b/common/kubernetes-cleanup.yml @@ -1,7 +1,4 @@ --- -- import_playbook: kubernetes-cluster-layout.yml - -######## - name: check for nodes to be removed hosts: _kubernetes_primary_master_ tasks: @@ -15,7 +12,7 @@ loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(groups['_kubernetes_nodes_']) }}" add_host: name: "{{ item }}" - inventory_dir: "{{ inventory_dir }}" + inventory_dir: "{{ hostvars[item].inventory_dir }}" group: _kubernetes_nodes_remove_ changed_when: False @@ -28,7 +25,7 @@ roles: - role: kubernetes/kubeadm/reset - role: kubernetes/net/kubeguard - when: kubernetes_network_plugin == 'kubeguard' + when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard' vars: kubeguard_action: remove diff --git a/common/kubernetes.yml b/common/kubernetes.yml index aaf23219..4fc8cef2 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,6 +1,4 @@ --- -- import_playbook: kubernetes-cluster-layout.yml - - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 7f96ff6a..9af041b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -22,7 +22,7 @@ - name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ host_name }} -o json" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" check_mode: no register: kubectl_get_node changed_when: False @@ -33,11 +33,11 @@ - name: remove taint from master node when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master-" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - name: add taint for master node when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master='':NoSchedule" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" - name: prepare kubectl (1/2) diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index 5efc91b5..e814e847 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -25,7 +25,7 @@ # register: kubeadm_token_generate - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt @@ -70,7 +70,7 @@ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - name: create bootstrap token for existing cluster - when: "groups['_kubernetes_nodes_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_create diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index fc85a37d..7025ace0 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -15,7 +15,7 @@ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - name: upload certs - when: "groups['_kubernetes_masters_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" command: kubeadm init phase upload-certs --upload-certs check_mode: no register: kubeadm_upload_certs @@ -26,7 +26,7 @@ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index dba2ce30..f7efdd81 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index a6d64c7d..f0e88e53 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -1,3 +1,13 @@ --- - name: clean up settings and files created by kubeadm command: kubeadm reset -f + +- name: clean up extra configs and logs + loop: + - /etc/kubernetes/kubeadm.config + - /etc/kubernetes/kubeadm-init.log + - /etc/kubernetes/kubeadm-join.log + - /etc/kubernetes/pki + file: + path: "{{ item }}" + state: absent diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index ed56cb78..97daa5b0 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -6,9 +6,12 @@ # - role: sshd # - role: zsh -- import_playbook: ../common/kubernetes.yml +- import_playbook: ../common/kubernetes-cluster-layout.yml vars: kubernetes_cluster_layout: nodes_group: k8s-test masters: - s2-k8s-test0 + +- import_playbook: ../common/kubernetes.yml +- import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3 From 8010f57a73885f7abb5c98c1f77c49baa59a7d16 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:24:09 +0100 Subject: kubernetes: multi master cluster works now --- inventory/group_vars/k8s-test/main.yml | 3 +-- .../kubeadm/master/tasks/primary-master.yml | 30 ++++++++++++---------- .../kubeadm/master/tasks/secondary-masters.yml | 27 ++++++++++--------- .../kubeadm/master/templates/kubeadm.config.j2 | 11 +++++--- roles/kubernetes/kubeadm/node/tasks/main.yml | 25 ++++++++++-------- .../kubeguard/templates/kubeguard-peer.service.j2 | 3 ++- spreadspace/k8s-test.yml | 3 +++ 7 files changed, 60 insertions(+), 42 deletions(-) (limited to 'roles/kubernetes/kubeadm/master/tasks/primary-master.yml') diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 0d4d0857..b5863ad1 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -14,6 +14,7 @@ kubernetes: dedicated_master: False api_extra_sans: + - 89.106.215.23 - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 @@ -25,8 +26,6 @@ kubernetes: kubeguard: - kube_router_version: 0.4.0-rc1 - ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index e814e847..115c8616 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -24,35 +24,39 @@ # check_mode: no # register: kubeadm_token_generate - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" -# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log + - name: initialize kubernetes master and store log + block: + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log - name: create bootstrap token for existing cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate + ### cluster is already initialized but config has changed - name: upgrade cluster config when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed block: - - name: fail for cluster upgrades fail: msg: "upgrading cluster config is currently not supported!" + ### cluster is already initialized - name: prepare cluster for new nodes diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 7025ace0..ffe1b4b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -25,18 +25,21 @@ set_fact: kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" -- name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log +- name: join kubernetes secondary master node and store log + block: + - name: join kubernetes secondary master node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log # TODO: acutally check if node has registered - name: give the new master(s) a moment to register diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 3c10e59b..869c809f 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,4 +1,4 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} {# #} apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration @@ -6,20 +6,25 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" +{% if kubernetes_kubelet_node_ip is defined %} +localAPIEndpoint: + advertiseAddress: {{ kubernetes_kubelet_node_ip }} +{% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io +{% if kubernetes_kubelet_node_ip is defined %} controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} apiServer: - extraArgs: - advertise-address: {{ kubernetes_kubelet_node_ip }} + #extraArgs: # encryption-provider-config: /etc/kubernetes/encryption/config # extraVolumes: # - name: encryption-config diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index f7efdd81..61d47111 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,13 +1,16 @@ --- -- name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join +- name: join kubernetes node and store log + block: + - name: join kubernetes node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 6f36b571..9ca444e8 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,6 +4,7 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} @@ -22,7 +23,7 @@ Type=oneshot {% if direct_zone %} ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} ExecStop=/sbin/ip route del {{ pod_net_peer }} ExecStop=/sbin/ip link set down dev {{ direct_interface }} ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 97daa5b0..f21b3fae 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -12,6 +12,9 @@ nodes_group: k8s-test masters: - s2-k8s-test0 + - s2-k8s-test1 + - s2-k8s-test2 + primary_master: s2-k8s-test0 - import_playbook: ../common/kubernetes.yml - import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3