summaryrefslogtreecommitdiff
path: root/roles/kubernetes/kubeadm/master
diff options
context:
space:
mode:
Diffstat (limited to 'roles/kubernetes/kubeadm/master')
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/main.yml108
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/primary-master.yml109
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml48
-rw-r--r--roles/kubernetes/kubeadm/master/templates/encryption-config.j213
-rw-r--r--roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j234
-rw-r--r--roles/kubernetes/kubeadm/master/templates/kubeadm.config.j245
6 files changed, 246 insertions, 111 deletions
diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml
index 7cc6fe94..9af041b2 100644
--- a/roles/kubernetes/kubeadm/master/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/main.yml
@@ -1,67 +1,25 @@
---
-- name: check if kubeconfig admin.conf already exists
- stat:
- path: /etc/kubernetes/admin.conf
- register: kubeconfig_admin_stats
+# - name: create direcotry for encryption config
+# file:
+# name: /etc/kubernetes/encryption
+# state: directory
+# mode: 0700
-### cluster not yet initialized
+# - name: install encryption config
+# template:
+# src: encryption-config.j2
+# dest: /etc/kubernetes/encryption/config
+# mode: 0600
-- name: create new cluster
- when: kubeconfig_admin_stats.stat.exists == False
- block:
- - name: generate bootstrap token for new cluster
- command: kubeadm token generate
- changed_when: False
- check_mode: no
- register: kubeadm_token_generate
+- name: install primary master
+ include_tasks: primary-master.yml
+ when: "'_kubernetes_primary_master_' in group_names"
- - name: create kubernetes config directory
- file:
- path: /etc/kubernetes
- state: directory
+- name: install secondary masters
+ include_tasks: secondary-masters.yml
+ when: "'_kubernetes_primary_master_' not in group_names"
- ## TODO test whether the generated cluster configs really works - since it has never been used...
- - name: install cluster config for kubeadm
- template:
- src: kubeadm-cluster.config.j2
- dest: /etc/kubernetes/kubeadm-cluster.config
-
- - name: set up kubernetes master
- command: "kubeadm init --config '/etc/kubernetes/kubeadm-cluster.config' --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
- args:
- creates: /etc/kubernetes/pki/ca.crt
- register: kubeadm_init
-
- - name: dump output of kubeadm init to log file
- when: kubeadm_init.changed
- copy:
- content: "{{ kubeadm_init.stdout }}\n"
- dest: /etc/kubernetes/kubeadm-init.log
-
-### cluster is already initialized
-
-- name: prepare cluster for new nodes
- when: kubeconfig_admin_stats.stat.exists == True
- block:
-
- - name: fetch list of current nodes
- command: kubectl get nodes -o name
- changed_when: False
- check_mode: no
- register: kubectl_node_list
-
- - name: save list of current nodes
- set_fact:
- kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
-
- - name: create bootstrap token for existing cluster
- when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0
- command: kubeadm token create --ttl 42m
- check_mode: no
- register: kubeadm_token_create
-
-##
- name: check if master is tainted (1/2)
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
@@ -74,28 +32,13 @@
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
- name: remove taint from master node
- when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints"
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
+ when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
- name: add taint for master node
- when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints"
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule"
-
-- name: install openssl
- apt:
- name: openssl
- state: present
+ when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
-- name: get ca certificate digest
- shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
- check_mode: no
- register: kube_ca_openssl
- changed_when: False
-
-- name: set variables needed by kubernetes/nodes to join the cluster
- set_fact:
- kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
- kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
- name: prepare kubectl (1/2)
file:
@@ -107,3 +50,14 @@
dest: /root/.kube/config
src: /etc/kubernetes/admin.conf
state: link
+
+- name: add kubectl completion config for shells
+ with_items:
+ - zsh
+ - bash
+ blockinfile:
+ path: "/root/.{{ item }}rc"
+ create: yes
+ marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###"
+ content: |
+ source <(kubectl completion {{ item }})
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
new file mode 100644
index 00000000..115c8616
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
@@ -0,0 +1,109 @@
+---
+- name: check if kubeconfig kubelet.conf already exists
+ stat:
+ path: /etc/kubernetes/kubelet.conf
+ register: kubeconfig_kubelet_stats
+
+- name: generate kubeadm.config
+ template:
+ src: kubeadm.config.j2
+ dest: /etc/kubernetes/kubeadm.config
+ register: kubeadm_config
+
+### cluster not yet initialized
+
+- name: create new cluster
+ when: not kubeconfig_kubelet_stats.stat.exists
+ block:
+
+ #### kubeadm wants token to come from --config if --config is used
+ #### i think this is stupid -> TODO: send bug report
+ # - name: generate bootstrap token for new cluster
+ # command: kubeadm token generate
+ # changed_when: False
+ # check_mode: no
+ # register: kubeadm_token_generate
+
+ - name: initialize kubernetes master and store log
+ block:
+ - name: initialize kubernetes master
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ args:
+ creates: /etc/kubernetes/pki/ca.crt
+ register: kubeadm_init
+
+ always:
+ - name: dump output of kubeadm init to log file
+ when: kubeadm_init.changed
+ copy:
+ content: "{{ kubeadm_init.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-init.log
+
+ - name: create bootstrap token for existing cluster
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_generate
+
+
+### cluster is already initialized but config has changed
+
+- name: upgrade cluster config
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed
+ block:
+
+ - name: fail for cluster upgrades
+ fail:
+ msg: "upgrading cluster config is currently not supported!"
+
+
+### cluster is already initialized
+
+- name: prepare cluster for new nodes
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: create bootstrap token for existing cluster
+ when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_create
+
+
+## calculate certificate digest
+
+- name: install openssl
+ apt:
+ name: openssl
+ state: present
+
+- name: get ca certificate digest
+ shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
+ args:
+ executable: /bin/bash
+ check_mode: no
+ register: kube_ca_openssl
+ changed_when: False
+
+- name: set variables needed by kubernetes/nodes to join the cluster
+ set_fact:
+ kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
+ kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ loop: "{{ groups['_kubernetes_nodes_'] }}"
+
+## Network Plugin
+
+# - name: install network plugin
+# include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
new file mode 100644
index 00000000..c00c3203
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -0,0 +1,48 @@
+---
+- name: fetch secrets needed for secondary master
+ run_once: true
+ delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}"
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: upload certs
+ when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm init phase upload-certs --upload-certs
+ check_mode: no
+ register: kubeadm_upload_certs
+
+
+- name: extracting encryption key for certs
+ set_fact:
+ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}"
+
+- name: join kubernetes secondary master node and store log
+ block:
+ - name: join kubernetes secondary master node
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
+ args:
+ creates: /etc/kubernetes/kubelet.conf
+ register: kubeadm_join
+
+ always:
+ - name: dump output of kubeadm join to log file
+ when: kubeadm_join is changed
+ # This is not a handler by design to make sure this action runs at this point of the play.
+ copy: # noqa 503
+ content: "{{ kubeadm_join.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-join.log
+
+ # TODO: acutally check if node has registered
+- name: give the new master(s) a moment to register
+ when: kubeadm_join is changed
+ pause: # noqa 503
+ seconds: 5
diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
new file mode 100644
index 00000000..345c9bf9
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2
@@ -0,0 +1,13 @@
+kind: EncryptionConfiguration
+apiVersion: apiserver.config.k8s.io/v1
+resources:
+ - resources:
+ - secrets
+ providers:
+ - secretbox:
+ keys:
+{% for key in kubernetes_secrets.encryption_config_keys %}
+ - name: key{{ loop.index }}
+ secret: {{ key }}
+{% endfor %}
+ - identity: {}
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2
deleted file mode 100644
index 07c4dddd..00000000
--- a/roles/kubernetes/kubeadm/master/templates/kubeadm-cluster.config.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 #}
-apiVersion: kubeadm.k8s.io/v1beta1
-kind: ClusterConfiguration
-kubernetesVersion: v{{ kubernetes.version }}
-clusterName: {{ kubernetes.cluster_name }}
-certificatesDir: /etc/kubernetes/pki
-{% if kubernetes.api_advertise_ip %}
-controlPlaneEndpoint: "{{ kubernetes.api_advertise_ip }}:6443"
-{% endif %}
-imageRepository: k8s.gcr.io
-networking:
- dnsDomain: cluster.local
- podSubnet: {{ kubernetes.pod_ip_range }}
- serviceSubnet: {{ kubernetes.service_ip_range }}
-etcd:
- local:
- dataDir: /var/lib/etcd
-apiServer:
-{% if kubernetes.api_extra_sans | length > 0 %}
- certSANs:
-{% for san in kubernetes.api_extra_sans %}
- - {{ san }}
-{% endfor %}
-{% endif %}
- extraArgs:
-{% if kubernetes.api_advertise_ip %}
- advertise-address: {{ kubernetes.api_advertise_ip }}
-{% endif %}
- authorization-mode: Node,RBAC
- timeoutForControlPlane: 4m0s
-controllerManager: {}
-scheduler: {}
-dns:
- type: CoreDNS
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
new file mode 100644
index 00000000..f48a34f3
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
@@ -0,0 +1,45 @@
+{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #}
+{# #}
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+{# TODO: this is ugly but we want to create our own token so we can #}
+{# better control it's lifetime #}
+bootstrapTokens:
+- ttl: "1s"
+localAPIEndpoint:
+ bindPort: 6442
+{% if kubernetes_overlay_node_ip is defined %}
+ advertiseAddress: {{ kubernetes_overlay_node_ip }}
+{% endif %}
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: ClusterConfiguration
+kubernetesVersion: {{ kubernetes_version }}
+clusterName: {{ kubernetes.cluster_name }}
+imageRepository: k8s.gcr.io
+controlPlaneEndpoint: 127.0.0.1:6443
+networking:
+ dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }}
+ podSubnet: {{ kubernetes.pod_ip_range }}
+ serviceSubnet: {{ kubernetes.service_ip_range }}
+apiServer:
+ # extraArgs:
+ # encryption-provider-config: /etc/kubernetes/encryption/config
+ # extraVolumes:
+ # - name: encryption-config
+ # hostPath: /etc/kubernetes/encryption
+ # mountPath: /etc/kubernetes/encryption
+ # readOnly: true
+ # pathType: Directory
+{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %}
+ certSANs: []
+{% else %}
+ certSANs:
+ {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }}
+{% endif %}
+controllerManager:
+ extraArgs:
+ node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}"
+scheduler: {}
+dns:
+ type: CoreDNS