blob: c9092bf34c7481be3b2ef609e6dfae1e000909ab (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
---
- name: check if kubeconfig admin.conf already exists
stat:
path: /etc/kubernetes/admin.conf
register: kubeconfig_admin_stats
### cluster not yet initialized
- name: create new cluster
when: kubeconfig_admin_stats.stat.exists == False
block:
- name: generate bootstrap token for new cluster
command: kubeadm token generate
changed_when: False
check_mode: no
register: kubeadm_token_generate
- name: set up kubernetes master
command: "kubeadm init --pod-network-cidr {{ kubernetes.pod_ip_range }} --service-cidr {{ kubernetes.service_ip_range }} --apiserver-advertise-address {{ kubernetes.api_advertise_ip | default('0.0.0.0') }} {% if kubernetes.api_extra_sans | length > 0 %}--apiserver-cert-extra-sans {{ kubernetes.api_extra_sans | join(',') }}{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
register: kubeadm_init
- name: dump output of kubeadm init to log file
when: kubeadm_init.changed
copy:
content: "{{ kubeadm_init.stdout }}\n"
dest: /etc/kubernetes/kubeadm-init.log
### cluster is already initialized
- name: prepare cluster for new nodes
when: kubeconfig_admin_stats.stat.exists == True
block:
- name: fetch list of current nodes
command: kubectl get nodes -o name
changed_when: False
check_mode: no
register: kubectl_node_list
- name: save list of current nodes
set_fact:
kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'nodes/', '') | list }}"
- name: create bootstrap token for existing cluster
when: kubernetes_nodes | difference(kubernetes_current_nodes) | length > 0
command: kubeadm token create --ttl 42m
check_mode: no
register: kubeadm_token_create
##
- name: check if master is tainted (1/2)
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
check_mode: no
register: kubectl_get_node
changed_when: False
- name: check if master is tainted (2/2)
set_fact:
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
- name: remove taint from master node
when: "kubernetes.dedicated_master == False and 'node-role.kubernetes.io/master' in kube_node_taints"
command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-
- name: add taint for master node
when: "kubernetes.dedicated_master == True and 'node-role.kubernetes.io/master' not in kube_node_taints"
command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ ansible_nodename }} node-role.kubernetes.io/master='':NoSchedule"
- name: install openssl
apt:
name: openssl
state: present
- name: get ca certificate digest
shell: "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
check_mode: no
register: kube_ca_openssl
changed_when: False
- name: set variables needed by kubernetes/nodes to join the cluster
set_fact:
kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
- name: prepare kubectl (1/2)
file:
name: /root/.kube
state: directory
- name: prepare kubectl (2/2)
file:
dest: /root/.kube/config
src: /etc/kubernetes/admin.conf
state: link
|