From 25a978d8ce30bdbb62a0a82443501a4f0d2d6cc2 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 03:03:17 +0100 Subject: kuberntes: base installation works now --- roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'roles/kubernetes/kubeadm/master') diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 index 07c4dddd..5ec18614 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 @@ -1,7 +1,7 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration -kubernetesVersion: v{{ kubernetes.version }} +kubernetesVersion: v{{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} certificatesDir: /etc/kubernetes/pki {% if kubernetes.api_advertise_ip %} -- cgit v1.2.3 From ddc8db7956cbf68afb1bb49401827e9b55ab139f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 03:35:03 +0100 Subject: kubernetes: new/updated kubeadm master role (WIP) --- common/kubernetes.yml | 6 +- roles/kubernetes/kubeadm/base/tasks/main.yml | 5 +- roles/kubernetes/kubeadm/master/tasks/main.yml | 110 ++++++------------- .../kubeadm/master/tasks/primary-master.yml | 120 +++++++++++++++++++++ .../kubeadm/master/tasks/secondary-masters.yml | 45 ++++++++ .../kubeadm/master/templates/encryption-config.j2 | 13 +++ .../master/templates/kubeadm-cluster.config.j2 | 39 ++++--- 7 files changed, 240 insertions(+), 98 deletions(-) create mode 100644 roles/kubernetes/kubeadm/master/tasks/primary-master.yml create mode 100644 roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml create mode 100644 roles/kubernetes/kubeadm/master/templates/encryption-config.j2 (limited to 'roles/kubernetes/kubeadm/master') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 96b39e5a..4a9cf65a 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -85,14 +85,12 @@ # - name: configure kubernetes primary master # hosts: _kubernetes_primary_master_ # roles: -# - role: kubernetes/kubeadm/master/common -# - role: kubernetes/kubeadm/master/primary +# - role: kubernetes/kubeadm/master # - name: configure kubernetes secondary masters # hosts: _kubernetes_masters_:!_kubernetes_primary_master_ # roles: -# - role: kubernetes/kubeadm/master/common -# - role: kubernetes/kubeadm/master/secondary +# - role: kubernetes/kubeadm/master # - name: configure kubernetes non-master nodes # hosts: _kubernetes_nodes_:!_kubernetes_masters_ diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 414fb67a..37944915 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -23,14 +23,13 @@ line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' create: yes -- name: add kubectl/kubeadm completion for shells +- name: add kubeadm completion for shells loop: - zsh - bash blockinfile: path: "/root/.{{ item }}rc" create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl/kubeadm ###" + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubeadm ###" content: | - source <(kubectl completion {{ item }}) source <(kubeadm completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 7cc6fe94..9ffdbeee 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -1,70 +1,28 @@ --- -- name: check if kubeconfig admin.conf already exists - stat: - path: /etc/kubernetes/admin.conf - register: kubeconfig_admin_stats - -### cluster not yet initialized - -- name: create new cluster - when: kubeconfig_admin_stats.stat.exists == False - block: - - - name: generate bootstrap token for new cluster - command: kubeadm token generate - changed_when: False - check_mode: no - register: kubeadm_token_generate - - - name: create kubernetes config directory - file: - path: /etc/kubernetes - state: directory - - ## TODO test whether the generated cluster configs really works - since it has never been used... - - name: install cluster config for kubeadm - template: - src: kubeadm-cluster.config.j2 - dest: /etc/kubernetes/kubeadm-cluster.config - - - name: set up kubernetes master - command: "kubeadm init --config '/etc/kubernetes/kubeadm-cluster.config' --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log - -### cluster is already initialized +- name: create direcotry for encryption config + file: + name: /etc/kubernetes/encryption + state: directory + mode: 0700 -- name: prepare cluster for new nodes - when: kubeconfig_admin_stats.stat.exists == True - block: +- name: install encryption config + template: + src: encryption-config.j2 + dest: /etc/kubernetes/encryption/config + mode: 0600 - - name: fetch list of current nodes - command: kubectl get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" +- name: install primary master + include_tasks: primary-master.yml + when: "'_kubernetes_primary_master_' in group_names" - - name: create bootstrap token for existing cluster - when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0 - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_create +- name: install secondary masters + include_tasks: secondary-masters.yml + when: "'_kubernetes_primary_master_' not in group_names" -## - name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ host_name }} -o json" check_mode: no register: kubectl_get_node changed_when: False @@ -74,28 +32,13 @@ kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" - name: remove taint from master node - when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints" - command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- + when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master-" - name: add taint for master node - when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule" - -- name: install openssl - apt: - name: openssl - state: present - -- name: get ca certificate digest - shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" - check_mode: no - register: kube_ca_openssl - changed_when: False + when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master='':NoSchedule" -- name: set variables needed by kubernetes/nodes to join the cluster - set_fact: - kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" - kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" - name: prepare kubectl (1/2) file: @@ -107,3 +50,14 @@ dest: /root/.kube/config src: /etc/kubernetes/admin.conf state: link + +- name: add kubectl completion config for shells + with_items: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + content: | + source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml new file mode 100644 index 00000000..58658794 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -0,0 +1,120 @@ +--- +- name: check if kubeconfig kubelet.conf already exists + stat: + path: /etc/kubernetes/kubelet.conf + register: kubeconfig_kubelet_stats + +- name: generate kubeadm.config + template: + src: kubeadm.config.j2 + dest: /etc/kubernetes/kubeadm.config + register: kubeadm_config + +### cluster not yet initialized + +- name: create new cluster + when: not kubeconfig_kubelet_stats.stat.exists + block: + + #### kubeadm wants token to come from --config if --config is used + #### i think this is stupid -> TODO: send bug report + # - name: generate bootstrap token for new cluster + # command: kubeadm token generate + # changed_when: False + # check_mode: no + # register: kubeadm_token_generate + + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" +# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + + - name: create bootstrap token for existing cluster + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_generate + +### cluster is already initialized but config has changed + +- name: upgrade cluster config + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed + block: + + + - name: fail for cluster upgrades + fail: + msg: "upgrading cluster config is currently not supported!" + + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: "groups['_kubernetes_nodes_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + +## + +## this fixes the kubelet kubeconfig to make use of certificate rotation. This is a bug in +## kubeadm init which was fixed with 1.17 release. TODO: remove this once all cluster have been +## upgraded to 1.17 or newer. +- name: fix kubeconfig of kubelet + lineinfile: + path: /etc/kubernetes/kubelet.conf + backrefs: yes + regexp: '^(\s*)client-{{ item }}(-data)?:' + line: '\1client-{{ item }}: /var/lib/kubelet/pki/kubelet-client-current.pem' + with_items: + - certificate + - key + notify: restart kubelet + + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + args: + executable: /bin/bash + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['_kubernetes_nodes_'] }}" + +## Network Plugin + +# - name: install network plugin +# include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml new file mode 100644 index 00000000..f7e25fb4 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -0,0 +1,45 @@ +--- +- name: fetch secrets needed for secondary master + run_once: true + delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: upload certs + when: "groups['_kubernetes_masters_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm init phase upload-certs --experimental-upload-certs + check_mode: no + register: kubeadm_upload_certs + + +- name: extracting encryption key for certs + set_fact: + kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" + +- name: join kubernetes secondary master node + command: "kubeadm join 127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} --apiserver-bind-port 6442{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --experimental-control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + +- name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + # TODO: acutally check if node has registered +- name: give the new master(s) a moment to register + when: kubeadm_join is changed + pause: # noqa 503 + seconds: 5 diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 new file mode 100644 index 00000000..a69ae84b --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 @@ -0,0 +1,13 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - secretbox: + keys: +{% for key in kubernetes.encryption_config_keys %} + - name: key{{ loop.index }} + secret: {{ key }} +{% endfor %} + - identity: {} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 index 5ec18614..78e9d7a7 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 @@ -1,34 +1,47 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +--- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration -kubernetesVersion: v{{ kubernetes_version }} +kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} -certificatesDir: /etc/kubernetes/pki +imageRepository: k8s.gcr.io {% if kubernetes.api_advertise_ip %} controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443" {% endif %} -imageRepository: k8s.gcr.io networking: dnsDomain: cluster.local podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} -etcd: - local: - dataDir: /var/lib/etcd apiServer: -{% if kubernetes.api_extra_sans | length > 0 %} + extraArgs: +{% if kubernetes.api_advertise_ip %} + advertise-address: {{ kubernetes.api_advertise_ip }} +{% endif %} + encryption-provider-config: /etc/kubernetes/encryption/config + extraVolumes: + - name: encryption-config + hostPath: /etc/kubernetes/encryption + mountPath: /etc/kubernetes/encryption + readOnly: true + pathType: Directory +{% if (kubernetes.api_extra_sans | length) == 0 %} + certSANs: [] +{% else %} certSANs: {% for san in kubernetes.api_extra_sans %} - {{ san }} {% endfor %} {% endif %} +controllerManager: extraArgs: -{% if kubernetes.api_advertise_ip %} - advertise-address: {{ kubernetes.api_advertise_ip }} -{% endif %} - authorization-mode: Node,RBAC - timeoutForControlPlane: 4m0s -controllerManager: {} + node-cidr-mask-size: "{{ kubernetes_network_node_cidr_size }}" scheduler: {} dns: type: CoreDNS -- cgit v1.2.3 From 7dbf0cae4e1a8d77e79b8aafd5bb08780977481f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 11 Jan 2020 04:38:08 +0100 Subject: kubernetes: kubeadm/master node some more cleanup (WIP) --- common/kubernetes.yml | 8 ++-- inventory/group_vars/k8s-test/main.yml | 5 ++- roles/kubernetes/kubeadm/master/tasks/main.yml | 22 +++++----- .../kubeadm/master/tasks/secondary-masters.yml | 4 +- .../kubeadm/master/templates/encryption-config.j2 | 2 +- .../master/templates/kubeadm-cluster.config.j2 | 47 ---------------------- .../kubeadm/master/templates/kubeadm.config.j2 | 41 +++++++++++++++++++ roles/kubernetes/kubeadm/node/tasks/main.yml | 13 ++---- spreadspace/group_vars/k8s-test.yml | 10 +++++ 9 files changed, 77 insertions(+), 75 deletions(-) delete mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 create mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 create mode 100644 spreadspace/group_vars/k8s-test.yml (limited to 'roles/kubernetes/kubeadm/master') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 4a9cf65a..d1b4592a 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -82,10 +82,10 @@ - role: kubernetes/base - role: kubernetes/kubeadm/base -# - name: configure kubernetes primary master -# hosts: _kubernetes_primary_master_ -# roles: -# - role: kubernetes/kubeadm/master +- name: configure kubernetes primary master + hosts: _kubernetes_primary_master_ + roles: + - role: kubernetes/kubeadm/master # - name: configure kubernetes secondary masters # hosts: _kubernetes_masters_:!_kubernetes_primary_master_ diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 979cc1a3..91b7b0c2 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -7,7 +7,6 @@ kubernetes: cluster_name: k8s-test dedicated_master: False - api_advertise_ip: 89.106.215.23 api_extra_sans: - k8s-test.spreadspace.org @@ -16,6 +15,10 @@ kubernetes: service_ip_range: 172.18.192.0/18 +# kubernetes_secrets: +# encryption_config_keys: "{{ vault_kubernetes_encryption_config_keys }}" + + kubeguard: kube_router_version: 0.4.0-rc1 diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 9ffdbeee..7f96ff6a 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -1,15 +1,15 @@ --- -- name: create direcotry for encryption config - file: - name: /etc/kubernetes/encryption - state: directory - mode: 0700 - -- name: install encryption config - template: - src: encryption-config.j2 - dest: /etc/kubernetes/encryption/config - mode: 0600 +# - name: create direcotry for encryption config +# file: +# name: /etc/kubernetes/encryption +# state: directory +# mode: 0700 + +# - name: install encryption config +# template: +# src: encryption-config.j2 +# dest: /etc/kubernetes/encryption/config +# mode: 0600 - name: install primary master diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index f7e25fb4..fc85a37d 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -16,7 +16,7 @@ - name: upload certs when: "groups['_kubernetes_masters_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm init phase upload-certs --experimental-upload-certs + command: kubeadm init phase upload-certs --upload-certs check_mode: no register: kubeadm_upload_certs @@ -26,7 +26,7 @@ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - name: join kubernetes secondary master node - command: "kubeadm join 127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} --apiserver-bind-port 6442{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --experimental-control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 index a69ae84b..345c9bf9 100644 --- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 @@ -6,7 +6,7 @@ resources: providers: - secretbox: keys: -{% for key in kubernetes.encryption_config_keys %} +{% for key in kubernetes_secrets.encryption_config_keys %} - name: key{{ loop.index }} secret: {{ key }} {% endfor %} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 deleted file mode 100644 index 78e9d7a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 +++ /dev/null @@ -1,47 +0,0 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} -{# #} -apiVersion: kubeadm.k8s.io/v1beta1 -kind: InitConfiguration -{# TODO: this is ugly but we want to create our own token so we can #} -{# better control it's lifetime #} -bootstrapTokens: -- ttl: "1s" ---- -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: {{ kubernetes_version }} -clusterName: {{ kubernetes.cluster_name }} -imageRepository: k8s.gcr.io -{% if kubernetes.api_advertise_ip %} -controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443" -{% endif %} -networking: - dnsDomain: cluster.local - podSubnet: {{ kubernetes.pod_ip_range }} - serviceSubnet: {{ kubernetes.service_ip_range }} -apiServer: - extraArgs: -{% if kubernetes.api_advertise_ip %} - advertise-address: {{ kubernetes.api_advertise_ip }} -{% endif %} - encryption-provider-config: /etc/kubernetes/encryption/config - extraVolumes: - - name: encryption-config - hostPath: /etc/kubernetes/encryption - mountPath: /etc/kubernetes/encryption - readOnly: true - pathType: Directory -{% if (kubernetes.api_extra_sans | length) == 0 %} - certSANs: [] -{% else %} - certSANs: -{% for san in kubernetes.api_extra_sans %} - - {{ san }} -{% endfor %} -{% endif %} -controllerManager: - extraArgs: - node-cidr-mask-size: "{{ kubernetes_network_node_cidr_size }}" -scheduler: {} -dns: - type: CoreDNS diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 new file mode 100644 index 00000000..e03ea6f6 --- /dev/null +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -0,0 +1,41 @@ +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +kubernetesVersion: {{ kubernetes_version }} +clusterName: {{ kubernetes.cluster_name }} +imageRepository: k8s.gcr.io +controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +networking: + dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} + podSubnet: {{ kubernetes.pod_ip_range }} + serviceSubnet: {{ kubernetes.service_ip_range }} +apiServer: + extraArgs: + advertise-address: {{ kubernetes_kubelet_node_ip }} + # encryption-provider-config: /etc/kubernetes/encryption/config + # extraVolumes: + # - name: encryption-config + # hostPath: /etc/kubernetes/encryption + # mountPath: /etc/kubernetes/encryption + # readOnly: true + # pathType: Directory +{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} + certSANs: [] +{% else %} + certSANs: + {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} +{% endif %} +controllerManager: + extraArgs: + node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" +scheduler: {} +dns: + type: CoreDNS diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 9f0057f9..2a140099 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,18 +1,13 @@ --- -- name: get master vars - set_fact: - kube_bootstrap_token: "{{ hostvars[kubernetes_master].kube_bootstrap_token }}" - kube_bootstrap_ca_cert_hash: "{{ hostvars[kubernetes_master].kube_bootstrap_ca_cert_hash }}" - kube_master_addr: "{{ kubernetes.api_advertise_ip | default(hostvars[kubernetes_master].ansible_default_ipv4.address) }}" - - name: join kubernetes node - command: "kubeadm join --token {{ kube_bootstrap_token }} {{ kube_master_addr }}:6443 --discovery-token-ca-cert-hash {{ kube_bootstrap_ca_cert_hash }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join - name: dump output of kubeadm join to log file - when: kubeadm_join.changed - copy: + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 content: "{{ kubeadm_join.stdout }}\n" dest: /etc/kubernetes/kubeadm-join.log diff --git a/spreadspace/group_vars/k8s-test.yml b/spreadspace/group_vars/k8s-test.yml new file mode 100644 index 00000000..389020c4 --- /dev/null +++ b/spreadspace/group_vars/k8s-test.yml @@ -0,0 +1,10 @@ +$ANSIBLE_VAULT;1.2;AES256;spreadspace +39376666393934306161383231356136393664373164653834393534623766323637666632313632 +3062623430363230333736643164393064346431346534650a393062613232663264383537396663 +39363838303361353766616264643139373062313437383332656162393536646262363561356264 +3333376139663332340a333036303333356333376630656632303464356261643731356336373337 +37303939363239613130363232646262353238333237633766613035643238356636323563636231 +66336562313963323536623732396534313131373338353136623461663033313534636561356131 +65373264636562336261316231656362333630656334373135633663666465376430303135383562 +33653663653132633834626165383832323235323563323334643830643934346466343762613433 +3463 -- cgit v1.2.3 From cd946c702fea849b06e0fd6a19ef5597235caf55 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 17:46:08 +0100 Subject: single master kubernetes cluster works now --- common/kubernetes.yml | 18 +++++++++--------- inventory/group_vars/k8s-test-2019vm/main.yml | 4 ++-- inventory/group_vars/k8s-test-atlas/main.yml | 4 ++-- inventory/group_vars/k8s-test/main.yml | 8 +++++++- .../kubernetes/kubeadm/master/tasks/primary-master.yml | 17 +---------------- .../kubeadm/master/templates/kubeadm.config.j2 | 4 ++-- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- spreadspace/k8s-test.yml | 12 ++++++------ 8 files changed, 30 insertions(+), 39 deletions(-) (limited to 'roles/kubernetes/kubeadm/master') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index c4f3f81e..aaf23219 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -45,14 +45,14 @@ roles: - role: kubernetes/kubeadm/master -# - name: configure kubernetes secondary masters -# hosts: _kubernetes_masters_:!_kubernetes_primary_master_ -# roles: -# - role: kubernetes/kubeadm/master - -# - name: configure kubernetes non-master nodes -# hosts: _kubernetes_nodes_:!_kubernetes_masters_ -# roles: -# - role: kubernetes/kubeadm/node +- name: configure kubernetes secondary masters + hosts: _kubernetes_masters_:!_kubernetes_primary_master_ + roles: + - role: kubernetes/kubeadm/master + +- name: configure kubernetes non-master nodes + hosts: _kubernetes_nodes_:!_kubernetes_masters_ + roles: + - role: kubernetes/kubeadm/node ### TODO: add node labels (ie. for ingress daeomnset) diff --git a/inventory/group_vars/k8s-test-2019vm/main.yml b/inventory/group_vars/k8s-test-2019vm/main.yml index 2cbe5be1..4c08a1bb 100644 --- a/inventory/group_vars/k8s-test-2019vm/main.yml +++ b/inventory/group_vars/k8s-test-2019vm/main.yml @@ -4,7 +4,7 @@ vm_host: sk-2019vm install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -12,7 +12,7 @@ install: type: zfs pool: storage name: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test-atlas/main.yml b/inventory/group_vars/k8s-test-atlas/main.yml index 4212cf5e..9838513d 100644 --- a/inventory/group_vars/k8s-test-atlas/main.yml +++ b/inventory/group_vars/k8s-test-atlas/main.yml @@ -6,7 +6,7 @@ vm_host: ch-atlas install: host: "{{ vm_host }}" mem: 1024 - numcpu: 1 + numcpu: 2 disks: primary: /dev/sda scsi: @@ -14,7 +14,7 @@ install: type: lvm vg: "{{ hostvars[vm_host].host_name }}" lv: "{{ inventory_hostname }}" - size: 5g + size: 10g interfaces: - bridge: br-public name: primary0 diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index e1b6570f..0d4d0857 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -1,5 +1,11 @@ --- -kubernetes_version: 1.16.4 +containerd_lvm: + vg: "{{ host_name }}" + lv: containerd + size: 4G + fs: ext4 + +kubernetes_version: 1.17.1 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index 58658794..5efc91b5 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -53,7 +53,6 @@ fail: msg: "upgrading cluster config is currently not supported!" - ### cluster is already initialized - name: prepare cluster for new nodes @@ -76,22 +75,8 @@ check_mode: no register: kubeadm_token_create -## - -## this fixes the kubelet kubeconfig to make use of certificate rotation. This is a bug in -## kubeadm init which was fixed with 1.17 release. TODO: remove this once all cluster have been -## upgraded to 1.17 or newer. -- name: fix kubeconfig of kubelet - lineinfile: - path: /etc/kubernetes/kubelet.conf - backrefs: yes - regexp: '^(\s*)client-{{ item }}(-data)?:' - line: '\1client-{{ item }}: /var/lib/kubelet/pki/kubelet-client-current.pem' - with_items: - - certificate - - key - notify: restart kubelet +## calculate certificate digest - name: install openssl apt: diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index e03ea6f6..3c10e59b 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,13 +1,13 @@ {# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" --- -apiVersion: kubeadm.k8s.io/v1beta1 +apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 2a140099..dba2ce30 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: join kubernetes node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 27599556..ed56cb78 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -1,10 +1,10 @@ --- -- name: Basic Node Setup - hosts: k8s-test - roles: - - role: base - - role: sshd - - role: zsh +# - name: Basic Node Setup +# hosts: k8s-test +# roles: +# - role: base +# - role: sshd +# - role: zsh - import_playbook: ../common/kubernetes.yml vars: -- cgit v1.2.3 From bb9a03e136bd8d1029bfb2c1cf0be22d28df1576 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 19:53:12 +0100 Subject: kubernetes: node cleanup works now --- common/kubernetes-cleanup.yml | 7 ++----- common/kubernetes.yml | 2 -- roles/kubernetes/kubeadm/master/tasks/main.yml | 6 +++--- roles/kubernetes/kubeadm/master/tasks/primary-master.yml | 4 ++-- roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml | 4 ++-- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- roles/kubernetes/kubeadm/reset/tasks/main.yml | 10 ++++++++++ spreadspace/k8s-test.yml | 5 ++++- 8 files changed, 24 insertions(+), 16 deletions(-) (limited to 'roles/kubernetes/kubeadm/master') diff --git a/common/kubernetes-cleanup.yml b/common/kubernetes-cleanup.yml index a320e0f8..be55d11e 100644 --- a/common/kubernetes-cleanup.yml +++ b/common/kubernetes-cleanup.yml @@ -1,7 +1,4 @@ --- -- import_playbook: kubernetes-cluster-layout.yml - -######## - name: check for nodes to be removed hosts: _kubernetes_primary_master_ tasks: @@ -15,7 +12,7 @@ loop: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list | difference(groups['_kubernetes_nodes_']) }}" add_host: name: "{{ item }}" - inventory_dir: "{{ inventory_dir }}" + inventory_dir: "{{ hostvars[item].inventory_dir }}" group: _kubernetes_nodes_remove_ changed_when: False @@ -28,7 +25,7 @@ roles: - role: kubernetes/kubeadm/reset - role: kubernetes/net/kubeguard - when: kubernetes_network_plugin == 'kubeguard' + when: hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_network_plugin == 'kubeguard' vars: kubeguard_action: remove diff --git a/common/kubernetes.yml b/common/kubernetes.yml index aaf23219..4fc8cef2 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -1,6 +1,4 @@ --- -- import_playbook: kubernetes-cluster-layout.yml - - name: prepare variables and do some sanity checks hosts: _kubernetes_nodes_ gather_facts: no diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml index 7f96ff6a..9af041b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ b/roles/kubernetes/kubeadm/master/tasks/main.yml @@ -22,7 +22,7 @@ - name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ host_name }} -o json" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" check_mode: no register: kubectl_get_node changed_when: False @@ -33,11 +33,11 @@ - name: remove taint from master node when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master-" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - name: add taint for master node when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ host_name }} node-role.kubernetes.io/master='':NoSchedule" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" - name: prepare kubectl (1/2) diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index 5efc91b5..e814e847 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -25,7 +25,7 @@ # register: kubeadm_token_generate - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt @@ -70,7 +70,7 @@ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - name: create bootstrap token for existing cluster - when: "groups['_kubernetes_nodes_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_create diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index fc85a37d..7025ace0 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -15,7 +15,7 @@ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - name: upload certs - when: "groups['_kubernetes_masters_'] | map('extract', hostvars) | map(attribute='host_name') | difference(kubernetes_current_nodes) | length > 0" + when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" command: kubeadm init phase upload-certs --upload-certs check_mode: no register: kubeadm_upload_certs @@ -26,7 +26,7 @@ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index dba2ce30..f7efdd81 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml index a6d64c7d..f0e88e53 100644 --- a/roles/kubernetes/kubeadm/reset/tasks/main.yml +++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml @@ -1,3 +1,13 @@ --- - name: clean up settings and files created by kubeadm command: kubeadm reset -f + +- name: clean up extra configs and logs + loop: + - /etc/kubernetes/kubeadm.config + - /etc/kubernetes/kubeadm-init.log + - /etc/kubernetes/kubeadm-join.log + - /etc/kubernetes/pki + file: + path: "{{ item }}" + state: absent diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index ed56cb78..97daa5b0 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -6,9 +6,12 @@ # - role: sshd # - role: zsh -- import_playbook: ../common/kubernetes.yml +- import_playbook: ../common/kubernetes-cluster-layout.yml vars: kubernetes_cluster_layout: nodes_group: k8s-test masters: - s2-k8s-test0 + +- import_playbook: ../common/kubernetes.yml +- import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3 From 8010f57a73885f7abb5c98c1f77c49baa59a7d16 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:24:09 +0100 Subject: kubernetes: multi master cluster works now --- inventory/group_vars/k8s-test/main.yml | 3 +-- .../kubeadm/master/tasks/primary-master.yml | 30 ++++++++++++---------- .../kubeadm/master/tasks/secondary-masters.yml | 27 ++++++++++--------- .../kubeadm/master/templates/kubeadm.config.j2 | 11 +++++--- roles/kubernetes/kubeadm/node/tasks/main.yml | 25 ++++++++++-------- .../kubeguard/templates/kubeguard-peer.service.j2 | 3 ++- spreadspace/k8s-test.yml | 3 +++ 7 files changed, 60 insertions(+), 42 deletions(-) (limited to 'roles/kubernetes/kubeadm/master') diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index 0d4d0857..b5863ad1 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -14,6 +14,7 @@ kubernetes: dedicated_master: False api_extra_sans: + - 89.106.215.23 - k8s-test.spreadspace.org pod_ip_range: 172.18.0.0/16 @@ -25,8 +26,6 @@ kubernetes: kubeguard: - kube_router_version: 0.4.0-rc1 - ## node_index must be in the range between 1 and 190 -> 189 hosts possible ## ## hardcoded hostnames are not nice but if we do this via host_vars diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml index e814e847..115c8616 100644 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml @@ -24,35 +24,39 @@ # check_mode: no # register: kubeadm_token_generate - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" -# command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log + - name: initialize kubernetes master and store log + block: + - name: initialize kubernetes master + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log - name: create bootstrap token for existing cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate + ### cluster is already initialized but config has changed - name: upgrade cluster config when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed block: - - name: fail for cluster upgrades fail: msg: "upgrading cluster config is currently not supported!" + ### cluster is already initialized - name: prepare cluster for new nodes diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 7025ace0..ffe1b4b2 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -25,18 +25,21 @@ set_fact: kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" -- name: join kubernetes secondary master node - command: "kubeadm join {{ host_vars[groups['_kubernetes_primary_master_']].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log +- name: join kubernetes secondary master node and store log + block: + - name: join kubernetes secondary master node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log # TODO: acutally check if node has registered - name: give the new master(s) a moment to register diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 3c10e59b..869c809f 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -1,4 +1,4 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} {# #} apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration @@ -6,20 +6,25 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" +{% if kubernetes_kubelet_node_ip is defined %} +localAPIEndpoint: + advertiseAddress: {{ kubernetes_kubelet_node_ip }} +{% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io +{% if kubernetes_kubelet_node_ip is defined %} controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} apiServer: - extraArgs: - advertise-address: {{ kubernetes_kubelet_node_ip }} + #extraArgs: # encryption-provider-config: /etc/kubernetes/encryption/config # extraVolumes: # - name: encryption-config diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index f7efdd81..61d47111 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -1,13 +1,16 @@ --- -- name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join +- name: join kubernetes node and store log + block: + - name: join kubernetes node + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join -- name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log diff --git a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 index 6f36b571..9ca444e8 100644 --- a/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 +++ b/roles/kubernetes/net/kubeguard/templates/kubeguard-peer.service.j2 @@ -4,6 +4,7 @@ After=network.target Requires=kubeguard-interfaces.service After=kubeguard-interfaces.service +{% set pod_ip_self = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') -%} {% set pod_net_peer = kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[peer]) -%} {% set direct_zone = kubeguard.direct_net_zones | direct_net_zone(inventory_hostname, peer) -%} {% if direct_zone %} @@ -22,7 +23,7 @@ Type=oneshot {% if direct_zone %} ExecStart=/sbin/ip addr add {{ direct_ip }} dev {{ direct_interface }} ExecStart=/sbin/ip link set up dev {{ direct_interface }} -ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} +ExecStart=/sbin/ip route add {{ pod_net_peer }} via {{ direct_ip_peer | ipaddr('address') }} src {{ pod_ip_self }} ExecStop=/sbin/ip route del {{ pod_net_peer }} ExecStop=/sbin/ip link set down dev {{ direct_interface }} ExecStop=/sbin/ip addr del {{ direct_ip }} dev {{ direct_interface }} diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index 97daa5b0..f21b3fae 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -12,6 +12,9 @@ nodes_group: k8s-test masters: - s2-k8s-test0 + - s2-k8s-test1 + - s2-k8s-test2 + primary_master: s2-k8s-test0 - import_playbook: ../common/kubernetes.yml - import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3 From b64058268b377cc78057b8ba8d3190e520d33053 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 22:42:27 +0100 Subject: kubernetes: kubernetes_overlay_node_ip --- common/kubernetes.yml | 5 +++++ inventory/group_vars/k8s-test/main.yml | 5 ++--- roles/kubernetes/kubeadm/base/tasks/main.yml | 4 ++-- roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml | 2 +- roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 | 8 ++++---- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- 6 files changed, 15 insertions(+), 11 deletions(-) (limited to 'roles/kubernetes/kubeadm/master') diff --git a/common/kubernetes.yml b/common/kubernetes.yml index 4fc8cef2..d5b58767 100644 --- a/common/kubernetes.yml +++ b/common/kubernetes.yml @@ -22,6 +22,11 @@ msg: "At least one node_index is < 1 (indizes start at 1)" that: (kubeguard.node_index.values() | min) > 0 + - name: check whether overlay node io is configured > 0 + assert: + msg: "For kubeguard to work you need to configure kubernetes_overlay_node_ip" + that: kubernetes_overlay_node_ip is defined + - name: make sure the kubernetes_cri_socket variable is configured correctly when: kubernetes_container_runtime == 'containerd' assert: diff --git a/inventory/group_vars/k8s-test/main.yml b/inventory/group_vars/k8s-test/main.yml index b5863ad1..60d381ec 100644 --- a/inventory/group_vars/k8s-test/main.yml +++ b/inventory/group_vars/k8s-test/main.yml @@ -8,6 +8,7 @@ containerd_lvm: kubernetes_version: 1.17.1 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard +kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" kubernetes: cluster_name: k8s-test @@ -45,6 +46,4 @@ kubeguard: s2-k8s-test0: direct0 s2-k8s-test1: direct0 - -kubernetes_kubelet_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" -kubernetes_cri_socket: "unix:///run/containerd/containerd.sock" +kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 37944915..69a09811 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -16,11 +16,11 @@ selection: hold - name: set kubelet node-ip - when: kubernetes_kubelet_node_ip is defined + when: kubernetes_overlay_node_ip is defined lineinfile: name: "/etc/default/kubelet" regexp: '^KUBELET_EXTRA_ARGS=' - line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_kubelet_node_ip }}' + line: 'KUBELET_EXTRA_ARGS=--node-ip={{ kubernetes_overlay_node_ip }}' create: yes - name: add kubeadm completion for shells diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index ffe1b4b2..3c800a87 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -28,7 +28,7 @@ - name: join kubernetes secondary master node and store log block: - name: join kubernetes secondary master node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_kubelet_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_kubelet_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 869c809f..06d59ced 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -6,9 +6,9 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" -{% if kubernetes_kubelet_node_ip is defined %} +{% if kubernetes_overlay_node_ip is defined %} localAPIEndpoint: - advertiseAddress: {{ kubernetes_kubelet_node_ip }} + advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 @@ -16,8 +16,8 @@ kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io -{% if kubernetes_kubelet_node_ip is defined %} -controlPlaneEndpoint: "{{ kubernetes_kubelet_node_ip }}:6443" +{% if kubernetes_overlay_node_ip is defined %} +controlPlaneEndpoint: "{{ kubernetes_overlay_node_ip }}:6443" {% endif %} networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index 61d47111..e4fff98b 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -2,7 +2,7 @@ - name: join kubernetes node and store log block: - name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_kubelet_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join -- cgit v1.2.3 From db478d9fde9bc89b4ab3bca7ffc7540794e85f10 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 17 Jan 2020 23:24:10 +0100 Subject: kubernetes: added haproxy for load balancing api servers --- roles/kubernetes/kubeadm/base/tasks/main.yml | 25 +++++++++++++++ .../kubeadm/base/templates/haproxy.cfg.j2 | 36 ++++++++++++++++++++++ .../kubeadm/master/tasks/secondary-masters.yml | 2 +- .../kubeadm/master/templates/kubeadm.config.j2 | 9 +++--- roles/kubernetes/kubeadm/node/tasks/main.yml | 2 +- spreadspace/k8s-test.yml | 15 ++++----- 6 files changed, 73 insertions(+), 16 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 (limited to 'roles/kubernetes/kubeadm/master') diff --git a/roles/kubernetes/kubeadm/base/tasks/main.yml b/roles/kubernetes/kubeadm/base/tasks/main.yml index 69a09811..8e913560 100644 --- a/roles/kubernetes/kubeadm/base/tasks/main.yml +++ b/roles/kubernetes/kubeadm/base/tasks/main.yml @@ -2,6 +2,8 @@ - name: install kubeadm and kubectl apt: name: + - haproxy + - hatop - "kubeadm{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" - "kubectl{% if kubernetes.pkg_version is defined %}={{ kubernetes.pkg_version }}{% endif %}" state: present @@ -33,3 +35,26 @@ marker: "### {mark} ANSIBLE MANAGED BLOCK for kubeadm ###" content: | source <(kubeadm completion {{ item }}) + +- name: configure haproxy + template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + register: haproxy_config + +- name: (re)start haproxy + systemd: + name: haproxy + state: "{% if haproxy_config is changed %}restarted{% else %}started{% endif %}" + enabled: yes + +- name: add hatop config for shells + loop: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for hatop ###" + content: | + alias hatop="hatop -s /var/run/haproxy/admin.sock" diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 new file mode 100644 index 00000000..3de6ac00 --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 @@ -0,0 +1,36 @@ +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + +frontend kube_api +{% if '_kubernetes_masters_' in group_names %} + bind *:6443 +{% else %} + bind 127.0.0.1:6443 +{% endif %} + mode tcp + timeout client 3h + default_backend kube_api + +backend kube_api + mode tcp +{% if '_kubernetes_masters_' in group_names %} + balance first +{% else %} + balance roundrobin +{% endif %} + option log-health-checks + option httpchk GET /healthz + http-check expect string ok + default-server inter 5s fall 3 rise 2 + timeout connect 5s + timeout server 3h +{% for master in groups['_kubernetes_masters_'] %} + server {{ hostvars[master].inventory_hostname }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none +{% endfor %} diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml index 3c800a87..c00c3203 100644 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml @@ -28,7 +28,7 @@ - name: join kubernetes secondary master node and store log block: - name: join kubernetes secondary master node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 index 06d59ced..f48a34f3 100644 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 @@ -6,8 +6,9 @@ kind: InitConfiguration {# better control it's lifetime #} bootstrapTokens: - ttl: "1s" -{% if kubernetes_overlay_node_ip is defined %} localAPIEndpoint: + bindPort: 6442 +{% if kubernetes_overlay_node_ip is defined %} advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} --- @@ -16,15 +17,13 @@ kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} imageRepository: k8s.gcr.io -{% if kubernetes_overlay_node_ip is defined %} -controlPlaneEndpoint: "{{ kubernetes_overlay_node_ip }}:6443" -{% endif %} +controlPlaneEndpoint: 127.0.0.1:6443 networking: dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} podSubnet: {{ kubernetes.pod_ip_range }} serviceSubnet: {{ kubernetes.service_ip_range }} apiServer: - #extraArgs: + # extraArgs: # encryption-provider-config: /etc/kubernetes/encryption/config # extraVolumes: # - name: encryption-config diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml index e4fff98b..1d5178ea 100644 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ b/roles/kubernetes/kubeadm/node/tasks/main.yml @@ -2,7 +2,7 @@ - name: join kubernetes node and store log block: - name: join kubernetes node - command: "kubeadm join {{ hostvars[groups['_kubernetes_primary_master_'][0]].kubernetes_overlay_node_ip }}:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" args: creates: /etc/kubernetes/kubelet.conf register: kubeadm_join diff --git a/spreadspace/k8s-test.yml b/spreadspace/k8s-test.yml index f21b3fae..b94f8301 100644 --- a/spreadspace/k8s-test.yml +++ b/spreadspace/k8s-test.yml @@ -1,10 +1,10 @@ --- -# - name: Basic Node Setup -# hosts: k8s-test -# roles: -# - role: base -# - role: sshd -# - role: zsh +- name: Basic Node Setup + hosts: k8s-test + roles: + - role: base + - role: sshd + - role: zsh - import_playbook: ../common/kubernetes-cluster-layout.yml vars: @@ -12,9 +12,6 @@ nodes_group: k8s-test masters: - s2-k8s-test0 - - s2-k8s-test1 - - s2-k8s-test2 - primary_master: s2-k8s-test0 - import_playbook: ../common/kubernetes.yml - import_playbook: ../common/kubernetes-cleanup.yml -- cgit v1.2.3