From c09b07327b688a6a47f523a15c1a5c29d4f476d0 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sat, 7 May 2022 22:45:49 +0200 Subject: k8s: rename masters to control-plane nodes --- .../kubeadm/base/templates/haproxy.cfg.j2 | 8 +- .../kubeadm/control-plane/tasks/main.yml | 76 +++++++ .../control-plane/tasks/net_kube-router.yml | 11 + .../kubeadm/control-plane/tasks/net_kubeguard.yml | 14 ++ .../kubeadm/control-plane/tasks/net_none.yml | 2 + .../kubeadm/control-plane/tasks/primary.yml | 131 ++++++++++++ .../kubeadm/control-plane/tasks/secondary.yml | 55 +++++ .../control-plane/templates/encryption-config.j2 | 13 ++ .../control-plane/templates/kubeadm.config.j2 | 53 +++++ .../templates/net_kube-router/config.0.4.0.yml.j2 | 235 ++++++++++++++++++++ .../templates/net_kube-router/config.1.1.1.yml.j2 | 236 +++++++++++++++++++++ .../templates/net_kube-router/config.1.4.0.yml.j2 | 236 +++++++++++++++++++++ .../net_kubeguard/kube-router.0.4.0.yml.j2 | 170 +++++++++++++++ .../net_kubeguard/kube-router.1.1.1.yml.j2 | 170 +++++++++++++++ .../control-plane/templates/node-local-dns.yml.j2 | 211 ++++++++++++++++++ roles/kubernetes/kubeadm/master/tasks/main.yml | 77 ------- .../kubeadm/master/tasks/net_kube-router.yml | 11 - .../kubeadm/master/tasks/net_kubeguard.yml | 14 -- roles/kubernetes/kubeadm/master/tasks/net_none.yml | 2 - .../kubeadm/master/tasks/primary-master.yml | 131 ------------ .../kubeadm/master/tasks/secondary-masters.yml | 55 ----- .../kubeadm/master/templates/encryption-config.j2 | 13 -- .../kubeadm/master/templates/kubeadm.config.j2 | 53 ----- .../templates/net_kube-router/config.0.4.0.yml.j2 | 235 -------------------- .../templates/net_kube-router/config.1.1.1.yml.j2 | 236 --------------------- .../templates/net_kube-router/config.1.4.0.yml.j2 | 236 --------------------- .../net_kubeguard/kube-router.0.4.0.yml.j2 | 170 --------------- .../net_kubeguard/kube-router.1.1.1.yml.j2 | 170 --------------- .../kubeadm/master/templates/node-local-dns.yml.j2 | 211 ------------------ roles/kubernetes/kubeadm/node/tasks/main.yml | 22 -- roles/kubernetes/kubeadm/prune/tasks/main.yml | 2 +- roles/kubernetes/kubeadm/upgrade | 12 +- roles/kubernetes/kubeadm/worker/tasks/main.yml | 22 ++ 33 files changed, 1646 insertions(+), 1647 deletions(-) create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/main.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/primary.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 create mode 100644 roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/tasks/main.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/net_none.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/primary-master.yml delete mode 100644 roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml delete mode 100644 roles/kubernetes/kubeadm/master/templates/encryption-config.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 delete mode 100644 roles/kubernetes/kubeadm/node/tasks/main.yml create mode 100644 roles/kubernetes/kubeadm/worker/tasks/main.yml (limited to 'roles/kubernetes/kubeadm') diff --git a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 index 2e0eaf5d..19118b2e 100644 --- a/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 +++ b/roles/kubernetes/kubeadm/base/templates/haproxy.cfg.j2 @@ -16,7 +16,7 @@ defaults option dontlog-normal frontend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} bind *:6443 {% else %} bind 127.0.0.1:6443 @@ -25,7 +25,7 @@ frontend kube_api default_backend kube_api backend kube_api -{% if '_kubernetes_masters_' in group_names %} +{% if '_kubernetes_controlplane_nodes_' in group_names %} balance first {% else %} balance roundrobin @@ -36,6 +36,6 @@ backend kube_api default-server inter 5s fall 3 rise 2 timeout connect 5s timeout server 3h -{% for master in groups['_kubernetes_masters_'] %} - server {{ master }} {{ hostvars[master].kubernetes_overlay_node_ip | default(hostvars[master].ansible_default_ipv4.address) }}:6442 {% if master == inventory_hostname %}id 1{% endif %} check check-ssl verify none +{% for node in groups['_kubernetes_controlplane_nodes_'] %} + server {{ node }} {{ hostvars[node].kubernetes_overlay_node_ip | default(hostvars[node].ansible_default_ipv4.address) }}:6442 {% if node == inventory_hostname %}id 1{% endif %} check check-ssl verify none {% endfor %} diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml new file mode 100644 index 00000000..d5bd378e --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- name: create direcotry for encryption config + file: + name: /etc/kubernetes/encryption + state: directory + mode: 0700 + +- name: install encryption config + template: + src: encryption-config.j2 + dest: /etc/kubernetes/encryption/config + mode: 0600 + + +- name: install primary control-plane node + include_tasks: primary.yml + when: "'_kubernetes_primary_controlplane_node_' in group_names" + +- name: install secondary control-plane nodes + include_tasks: secondary.yml + when: "'_kubernetes_primary_controlplane_node_' not in group_names" + + +- name: check if control-plane node is tainted (1/2) + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" + check_mode: no + register: kubectl_get_node + changed_when: False + +- name: check if control-plane node is tainted (2/2) + set_fact: + kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" + +- name: remove taint from control-plane node + when: not kubernetes.dedicated_controlplane_nodes + block: + - name: remove control-plane taint from node + when: "'node-role.kubernetes.io/control-plane' in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-" + + - name: remove deprecated master taint from node + when: "'node-role.kubernetes.io/master' in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" + +- name: add taint from control-plane node + when: kubernetes.dedicated_controlplane_nodes + block: + - name: add control-plane taint to node + when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" + + - name: add deprecated master taint to node + when: "'node-role.kubernetes.io/master' not in kube_node_taints" + command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" + +- name: prepare kubectl (1/2) + file: + name: /root/.kube + state: directory + +- name: prepare kubectl (2/2) + file: + dest: /root/.kube/config + src: /etc/kubernetes/admin.conf + state: link + +- name: add kubectl completion config for shells + loop: + - zsh + - bash + blockinfile: + path: "/root/.{{ item }}rc" + create: yes + marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" + content: | + source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml new file mode 100644 index 00000000..0a216414 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml @@ -0,0 +1,11 @@ +--- +- name: generate kube-router configuration + template: + src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + + ## TODO: move to server-side apply (GA since 1.22) +- name: install kube-router on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kube_router_apply_result + changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml new file mode 100644 index 00000000..a572ca89 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml @@ -0,0 +1,14 @@ +--- +- name: install kube-router variant + when: "kubernetes_network_plugin_variant == 'with-kube-router'" + block: + - name: generate kubeguard (kube-router) configuration + template: + src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" + dest: /etc/kubernetes/network-plugin.yml + + ## TODO: move to server-side apply (GA since 1.22) + - name: install kubeguard (kube-router) on to the cluster + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml + register: kubeguard_apply_result + changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml new file mode 100644 index 00000000..bf1a16d5 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml @@ -0,0 +1,2 @@ +--- +## this "plugin" is for testing purposes only diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml new file mode 100644 index 00000000..22a5af42 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -0,0 +1,131 @@ +--- +- name: check if kubeconfig kubelet.conf already exists + stat: + path: /etc/kubernetes/kubelet.conf + register: kubeconfig_kubelet_stats + + ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) +- name: generate kubeadm.config + template: + src: kubeadm.config.j2 + dest: /etc/kubernetes/kubeadm.config + register: kubeadm_config + +### cluster not yet initialized + +- name: create new cluster + when: not kubeconfig_kubelet_stats.stat.exists + block: + + #### kubeadm wants token to come from --config if --config is used + #### i think this is stupid -> TODO: send bug report + # - name: generate bootstrap token for new cluster + # command: kubeadm token generate + # changed_when: False + # check_mode: no + # register: kubeadm_token_generate + + - name: initialize kubernetes primary control-plane node and store log + block: + - name: initialize kubernetes primary control-plane node + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + args: + creates: /etc/kubernetes/pki/ca.crt + register: kubeadm_init + + always: + - name: dump output of kubeadm init to log file + when: kubeadm_init.changed + copy: + content: "{{ kubeadm_init.stdout }}\n" + dest: /etc/kubernetes/kubeadm-init.log + + - name: dump error output of kubeadm init to log file + when: kubeadm_init.changed and kubeadm_init.stderr + copy: + content: "{{ kubeadm_init.stderr }}\n" + dest: /etc/kubernetes/kubeadm-init.errors + + - name: create bootstrap token for existing cluster + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_generate + + +### cluster is already initialized but config has changed + +- name: upgrade cluster config + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed + block: + + - name: fail for cluster upgrades + fail: + msg: "upgrading cluster config is currently not supported!" + + +### cluster is already initialized + +- name: prepare cluster for new nodes + when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: create bootstrap token for existing cluster + when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm token create --ttl 42m + check_mode: no + register: kubeadm_token_create + + +## calculate certificate digest + +- name: install openssl + apt: + name: openssl + state: present + +- name: get ca certificate digest + shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" + args: + executable: /bin/bash + check_mode: no + register: kube_ca_openssl + changed_when: False + +- name: set variables needed by kubernetes/nodes to join the cluster + set_fact: + kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" + kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" + delegate_to: "{{ item }}" + delegate_facts: True + loop: "{{ groups['_kubernetes_nodes_'] }}" + + +## install node-local-dns + +- name: generate node-local dns cache config + template: + src: node-local-dns.yml.j2 + dest: /etc/kubernetes/node-local-dns.yml + + ## TODO: move to server-side apply (GA since 1.22) +- name: install node-local dns cache + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml + register: kube_node_local_dns_apply_result + changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 + + +## Network Plugin + +- name: install network plugin + include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml new file mode 100644 index 00000000..a2dbe081 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml @@ -0,0 +1,55 @@ +--- +- name: fetch secrets needed for secondary control-plane node + run_once: true + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" + block: + + - name: fetch list of current nodes + command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name + changed_when: False + check_mode: no + register: kubectl_node_list + + - name: save list of current nodes + set_fact: + kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" + + - name: upload certs + when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0" + command: kubeadm init phase upload-certs --upload-certs + check_mode: no + register: kubeadm_upload_certs + + +- name: extracting encryption key for certs + set_fact: + kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" + +- name: join kubernetes secondary control-plane node and store log + block: + - name: join kubernetes secondary control-plane node + throttle: 1 + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors + + # TODO: acutally check if node has registered +- name: give the new control-plane node(s) a moment to register + when: kubeadm_join is changed + pause: # noqa 503 + seconds: 5 diff --git a/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 new file mode 100644 index 00000000..345c9bf9 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 @@ -0,0 +1,13 @@ +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - secretbox: + keys: +{% for key in kubernetes_secrets.encryption_config_keys %} + - name: key{{ loop.index }} + secret: {{ key }} +{% endfor %} + - identity: {} diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 new file mode 100644 index 00000000..2fa98ed6 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 @@ -0,0 +1,53 @@ +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# #} +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +{# TODO: this is ugly but we want to create our own token so we can #} +{# better control it's lifetime #} +bootstrapTokens: +- ttl: "1s" +localAPIEndpoint: + bindPort: 6442 +{% if kubernetes_overlay_node_ip is defined %} + advertiseAddress: {{ kubernetes_overlay_node_ip }} +{% endif %} +nodeRegistration: + criSocket: {{ kubernetes_cri_socket }} +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: {{ kubernetes_version }} +clusterName: {{ kubernetes.cluster_name }} +imageRepository: k8s.gcr.io +controlPlaneEndpoint: 127.0.0.1:6443 +networking: + dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} + podSubnet: {{ kubernetes.pod_ip_range }} + serviceSubnet: {{ kubernetes.service_ip_range }} +apiServer: + extraArgs: + encryption-provider-config: /etc/kubernetes/encryption/config + extraVolumes: + - name: encryption-config + hostPath: /etc/kubernetes/encryption + mountPath: /etc/kubernetes/encryption + readOnly: true + pathType: Directory +{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} + certSANs: [] +{% else %} + certSANs: + {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} +{% endif %} +controllerManager: + extraArgs: + node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" +scheduler: {} +dns: + type: CoreDNS +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +clusterDNS: +- {{ kubernetes_nodelocal_dnscache_ip }} +cgroupDriver: systemd diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 new file mode 100644 index 00000000..a2660db2 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 @@ -0,0 +1,235 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 new file mode 100644 index 00000000..382164cb --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 @@ -0,0 +1,236 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 new file mode 100644 index 00000000..382164cb --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 @@ -0,0 +1,236 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "hairpinMode": true, + "ipam":{ + "type":"host-local" + } + }, + { + "type":"portmap", + "capabilities":{ + "snat":true, + "portMappings":true + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + - --routes-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: busybox + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kube-router-cfg + mountPath: /etc/kube-router + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 new file mode 100644 index 00000000..e343f4a7 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 @@ -0,0 +1,170 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --cluster-cidr={{ kubernetes.pod_ip_range }} + - --run-router=false + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 new file mode 100644 index 00000000..ec30d670 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 @@ -0,0 +1,170 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-kubeconfig + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + kubeconfig.conf: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "8080" + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + serviceAccount: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} + imagePullPolicy: Always + args: + - --run-router=false + - --run-firewall=true + - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --hairpin-mode + - --iptables-sync-period=10s + - --ipvs-sync-period=10s + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + cpu: 250m + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: kubeconfig + mountPath: /var/lib/kube-router + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kube-router-kubeconfig + items: + - key: kubeconfig.conf + path: kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: +- kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 new file mode 100644 index 00000000..d536d5a7 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 @@ -0,0 +1,211 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ kubernetes.dns_domain | default('cluster.local') }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health {{ kubernetes_nodelocal_dnscache_ip }}:8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind {{ kubernetes_nodelocal_dnscache_ip }} + forward . __PILLAR__UPSTREAM__SERVERS__ { + force_tcp + } + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + hostNetwork: true + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] + securityContext: + privileged: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + host: {{ kubernetes_nodelocal_dnscache_ip }} + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml deleted file mode 100644 index 04df760f..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/main.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: create direcotry for encryption config - file: - name: /etc/kubernetes/encryption - state: directory - mode: 0700 - -- name: install encryption config - template: - src: encryption-config.j2 - dest: /etc/kubernetes/encryption/config - mode: 0600 - - -- name: install primary master - include_tasks: primary-master.yml - when: "'_kubernetes_primary_master_' in group_names" - -- name: install secondary masters - include_tasks: secondary-masters.yml - when: "'_kubernetes_primary_master_' not in group_names" - - -- name: check if master is tainted (1/2) - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json" - check_mode: no - register: kubectl_get_node - changed_when: False - -- name: check if master is tainted (2/2) - set_fact: - kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}" - -- name: remove taint from master/control-plane node - when: not kubernetes.dedicated_master - block: - - name: remove master taint from node - when: "'node-role.kubernetes.io/master' in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-" - - - name: remove control-plane taint from node - when: "'node-role.kubernetes.io/control-plane' in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-" - -- name: add taint from master/control-plane node - when: kubernetes.dedicated_master - block: - - name: add master taint from node - when: "'node-role.kubernetes.io/master' not in kube_node_taints" - command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule" - - ## TODO: enable this once all needed addons and workloads have tolerations set accordingly - # - name: add control-plane taint from node - # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints" - # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule" - -- name: prepare kubectl (1/2) - file: - name: /root/.kube - state: directory - -- name: prepare kubectl (2/2) - file: - dest: /root/.kube/config - src: /etc/kubernetes/admin.conf - state: link - -- name: add kubectl completion config for shells - loop: - - zsh - - bash - blockinfile: - path: "/root/.{{ item }}rc" - create: yes - marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###" - content: | - source <(kubectl completion {{ item }}) diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml deleted file mode 100644 index 0a216414..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_kube-router.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: generate kube-router configuration - template: - src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" - dest: /etc/kubernetes/network-plugin.yml - - ## TODO: move to server-side apply (GA since 1.22) -- name: install kube-router on to the cluster - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kube_router_apply_result - changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml deleted file mode 100644 index a572ca89..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_kubeguard.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: install kube-router variant - when: "kubernetes_network_plugin_variant == 'with-kube-router'" - block: - - name: generate kubeguard (kube-router) configuration - template: - src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" - dest: /etc/kubernetes/network-plugin.yml - - ## TODO: move to server-side apply (GA since 1.22) - - name: install kubeguard (kube-router) on to the cluster - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kubeguard_apply_result - changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/master/tasks/net_none.yml b/roles/kubernetes/kubeadm/master/tasks/net_none.yml deleted file mode 100644 index bf1a16d5..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/net_none.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -## this "plugin" is for testing purposes only diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml deleted file mode 100644 index 6fb63d09..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml +++ /dev/null @@ -1,131 +0,0 @@ ---- -- name: check if kubeconfig kubelet.conf already exists - stat: - path: /etc/kubernetes/kubelet.conf - register: kubeconfig_kubelet_stats - - ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) -- name: generate kubeadm.config - template: - src: kubeadm.config.j2 - dest: /etc/kubernetes/kubeadm.config - register: kubeadm_config - -### cluster not yet initialized - -- name: create new cluster - when: not kubeconfig_kubelet_stats.stat.exists - block: - - #### kubeadm wants token to come from --config if --config is used - #### i think this is stupid -> TODO: send bug report - # - name: generate bootstrap token for new cluster - # command: kubeadm token generate - # changed_when: False - # check_mode: no - # register: kubeadm_token_generate - - - name: initialize kubernetes master and store log - block: - - name: initialize kubernetes master - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" - args: - creates: /etc/kubernetes/pki/ca.crt - register: kubeadm_init - - always: - - name: dump output of kubeadm init to log file - when: kubeadm_init.changed - copy: - content: "{{ kubeadm_init.stdout }}\n" - dest: /etc/kubernetes/kubeadm-init.log - - - name: dump error output of kubeadm init to log file - when: kubeadm_init.changed and kubeadm_init.stderr - copy: - content: "{{ kubeadm_init.stderr }}\n" - dest: /etc/kubernetes/kubeadm-init.errors - - - name: create bootstrap token for existing cluster - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_generate - - -### cluster is already initialized but config has changed - -- name: upgrade cluster config - when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed - block: - - - name: fail for cluster upgrades - fail: - msg: "upgrading cluster config is currently not supported!" - - -### cluster is already initialized - -- name: prepare cluster for new nodes - when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed - block: - - - name: fetch list of current nodes - command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - - - name: create bootstrap token for existing cluster - when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm token create --ttl 42m - check_mode: no - register: kubeadm_token_create - - -## calculate certificate digest - -- name: install openssl - apt: - name: openssl - state: present - -- name: get ca certificate digest - shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'" - args: - executable: /bin/bash - check_mode: no - register: kube_ca_openssl - changed_when: False - -- name: set variables needed by kubernetes/nodes to join the cluster - set_fact: - kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}" - kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}" - delegate_to: "{{ item }}" - delegate_facts: True - loop: "{{ groups['_kubernetes_nodes_'] }}" - - -## install node-local-dns - -- name: generate node-local dns cache config - template: - src: node-local-dns.yml.j2 - dest: /etc/kubernetes/node-local-dns.yml - - ## TODO: move to server-side apply (GA since 1.22) -- name: install node-local dns cache - command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml - register: kube_node_local_dns_apply_result - changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 - - -## Network Plugin - -- name: install network plugin - include_tasks: "net_{{ kubernetes_network_plugin }}.yml" diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml deleted file mode 100644 index 4759b7fd..00000000 --- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: fetch secrets needed for secondary master - run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" - block: - - - name: fetch list of current nodes - command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name - changed_when: False - check_mode: no - register: kubectl_node_list - - - name: save list of current nodes - set_fact: - kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}" - - - name: upload certs - when: "groups['_kubernetes_masters_'] | difference(kubernetes_current_nodes) | length > 0" - command: kubeadm init phase upload-certs --upload-certs - check_mode: no - register: kubeadm_upload_certs - - -- name: extracting encryption key for certs - set_fact: - kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}" - -- name: join kubernetes secondary master node and store log - block: - - name: join kubernetes secondary master node - throttle: 1 - command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - - always: - - name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log - - - name: dump error output of kubeadm join to log file - when: kubeadm_join.changed and kubeadm_join.stderr - copy: - content: "{{ kubeadm_join.stderr }}\n" - dest: /etc/kubernetes/kubeadm-join.errors - - # TODO: acutally check if node has registered -- name: give the new master(s) a moment to register - when: kubeadm_join is changed - pause: # noqa 503 - seconds: 5 diff --git a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 deleted file mode 100644 index 345c9bf9..00000000 --- a/roles/kubernetes/kubeadm/master/templates/encryption-config.j2 +++ /dev/null @@ -1,13 +0,0 @@ -kind: EncryptionConfiguration -apiVersion: apiserver.config.k8s.io/v1 -resources: - - resources: - - secrets - providers: - - secretbox: - keys: -{% for key in kubernetes_secrets.encryption_config_keys %} - - name: key{{ loop.index }} - secret: {{ key }} -{% endfor %} - - identity: {} diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 deleted file mode 100644 index 2fa98ed6..00000000 --- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 +++ /dev/null @@ -1,53 +0,0 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} -{# #} -apiVersion: kubeadm.k8s.io/v1beta2 -kind: InitConfiguration -{# TODO: this is ugly but we want to create our own token so we can #} -{# better control it's lifetime #} -bootstrapTokens: -- ttl: "1s" -localAPIEndpoint: - bindPort: 6442 -{% if kubernetes_overlay_node_ip is defined %} - advertiseAddress: {{ kubernetes_overlay_node_ip }} -{% endif %} -nodeRegistration: - criSocket: {{ kubernetes_cri_socket }} ---- -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -kubernetesVersion: {{ kubernetes_version }} -clusterName: {{ kubernetes.cluster_name }} -imageRepository: k8s.gcr.io -controlPlaneEndpoint: 127.0.0.1:6443 -networking: - dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }} - podSubnet: {{ kubernetes.pod_ip_range }} - serviceSubnet: {{ kubernetes.service_ip_range }} -apiServer: - extraArgs: - encryption-provider-config: /etc/kubernetes/encryption/config - extraVolumes: - - name: encryption-config - hostPath: /etc/kubernetes/encryption - mountPath: /etc/kubernetes/encryption - readOnly: true - pathType: Directory -{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %} - certSANs: [] -{% else %} - certSANs: - {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }} -{% endif %} -controllerManager: - extraArgs: - node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" -scheduler: {} -dns: - type: CoreDNS ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -clusterDNS: -- {{ kubernetes_nodelocal_dnscache_ip }} -cgroupDriver: systemd diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 deleted file mode 100644 index a2660db2..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 +++ /dev/null @@ -1,235 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 deleted file mode 100644 index 382164cb..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 +++ /dev/null @@ -1,236 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 deleted file mode 100644 index 382164cb..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.4.0.yml.j2 +++ /dev/null @@ -1,236 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "hairpinMode": true, - "ipam":{ - "type":"host-local" - } - }, - { - "type":"portmap", - "capabilities":{ - "snat":true, - "portMappings":true - } - } - ] - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - - --routes-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: busybox - imagePullPolicy: Always - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 deleted file mode 100644 index e343f4a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --cluster-cidr={{ kubernetes.pod_ip_range }} - - --run-router=false - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 deleted file mode 100644 index ec30d670..00000000 --- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 +++ /dev/null @@ -1,170 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-kubeconfig - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - kubeconfig.conf: | - apiVersion: v1 - kind: Config - clusters: - - cluster: - certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }} - name: default - contexts: - - context: - cluster: default - namespace: default - user: default - name: default - current-context: default - users: - - name: default - user: - tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "8080" - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - serviceAccount: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }} - imagePullPolicy: Always - args: - - --run-router=false - - --run-firewall=true - - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }} - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --hairpin-mode - - --iptables-sync-period=10s - - --ipvs-sync-period=10s - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 250m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: kubeconfig - mountPath: /var/lib/kube-router - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - tolerations: - - effect: NoSchedule - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: kubeconfig - configMap: - name: kube-router-kubeconfig - items: - - key: kubeconfig.conf - path: kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - list - - get - - watch - - apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - list - - get - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 deleted file mode 100644 index d536d5a7..00000000 --- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: node-local-dns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns-upstream - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "KubeDNSUpstream" -spec: - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: 53 - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: 53 - selector: - k8s-app: kube-dns ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: node-local-dns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: Reconcile -data: - Corefile: | - {{ kubernetes.dns_domain | default('cluster.local') }}:53 { - errors - cache { - success 9984 30 - denial 9984 5 - } - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - health {{ kubernetes_nodelocal_dnscache_ip }}:8080 - } - in-addr.arpa:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - } - ip6.arpa:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__CLUSTER__DNS__ { - force_tcp - } - prometheus :9253 - } - .:53 { - errors - cache 30 - reload - loop - bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__UPSTREAM__SERVERS__ { - force_tcp - } - prometheus :9253 - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: node-local-dns - namespace: kube-system - labels: - k8s-app: node-local-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: - updateStrategy: - rollingUpdate: - maxUnavailable: 10% - selector: - matchLabels: - k8s-app: node-local-dns - template: - metadata: - labels: - k8s-app: node-local-dns - annotations: - prometheus.io/port: "9253" - prometheus.io/scrape: "true" - spec: - priorityClassName: system-node-critical - serviceAccountName: node-local-dns - hostNetwork: true - dnsPolicy: Default # Don't use cluster DNS. - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - - effect: "NoExecute" - operator: "Exists" - - effect: "NoSchedule" - operator: "Exists" - containers: - - name: node-cache - image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0 - resources: - requests: - cpu: 25m - memory: 5Mi - args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ] - securityContext: - privileged: true - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9253 - name: metrics - protocol: TCP - livenessProbe: - httpGet: - host: {{ kubernetes_nodelocal_dnscache_ip }} - path: /health - port: 8080 - initialDelaySeconds: 60 - timeoutSeconds: 5 - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: config-volume - mountPath: /etc/coredns - - name: kube-dns-config - mountPath: /etc/kube-dns - volumes: - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - - name: config-volume - configMap: - name: node-local-dns - items: - - key: Corefile - path: Corefile.base ---- -# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. -# We use this to expose metrics to Prometheus. -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/port: "9253" - prometheus.io/scrape: "true" - labels: - k8s-app: node-local-dns - name: node-local-dns - namespace: kube-system -spec: - clusterIP: None - ports: - - name: metrics - port: 9253 - targetPort: 9253 - selector: - k8s-app: node-local-dns diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml deleted file mode 100644 index 13937bcf..00000000 --- a/roles/kubernetes/kubeadm/node/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: join kubernetes node and store log - block: - - name: join kubernetes node - command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" - args: - creates: /etc/kubernetes/kubelet.conf - register: kubeadm_join - - always: - - name: dump output of kubeadm join to log file - when: kubeadm_join is changed - # This is not a handler by design to make sure this action runs at this point of the play. - copy: # noqa 503 - content: "{{ kubeadm_join.stdout }}\n" - dest: /etc/kubernetes/kubeadm-join.log - - - name: dump error output of kubeadm join to log file - when: kubeadm_join.changed and kubeadm_join.stderr - copy: - content: "{{ kubeadm_join.stderr }}\n" - dest: /etc/kubernetes/kubeadm-join.errors diff --git a/roles/kubernetes/kubeadm/prune/tasks/main.yml b/roles/kubernetes/kubeadm/prune/tasks/main.yml index 71ed0d04..45020963 100644 --- a/roles/kubernetes/kubeadm/prune/tasks/main.yml +++ b/roles/kubernetes/kubeadm/prune/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: remove nodes from api server run_once: true - delegate_to: "{{ groups['_kubernetes_primary_master_'] | first }}" + delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}" loop: "{{ groups['_kubernetes_nodes_prune_'] | default([]) }}" command: "kubectl delete node {{ item }}" diff --git a/roles/kubernetes/kubeadm/upgrade b/roles/kubernetes/kubeadm/upgrade index c2f97d40..2cfa18cd 100644 --- a/roles/kubernetes/kubeadm/upgrade +++ b/roles/kubernetes/kubeadm/upgrade @@ -1,8 +1,8 @@ Cluster Upgrades: ================= -primary master: ---------------- +primary control-plane node: +--------------------------- VERSION=1.23.1 @@ -26,8 +26,8 @@ apt-get update && apt-get install -y "kubelet=$VERSION-00" "kubectl=$VERSION-00" kubectl uncordon $(hostname) -secondary master: ------------------ +secondary control-plane node: +----------------------------- VERSION=1.23.1 @@ -55,7 +55,7 @@ apt-get update sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubeadm.pref apt-get install -y "kubeadm=$VERSION-00" -@primary master: kubectl drain --ignore-daemonsets --delete-emptydir-data +@primary control-plane node: kubectl drain --ignore-daemonsets --delete-emptydir-data kubeadm upgrade node sed "s/^Pin: version .*$/Pin: version $VERSION-00/" -i /etc/apt/preferences.d/kubelet.pref @@ -64,4 +64,4 @@ apt-get update && apt-get install -y kubelet="$VERSION-00" "kubectl=$VERSION-00" // security updates + reboot ? -@primary master: kubectl uncordon +@primary control-plane node: kubectl uncordon diff --git a/roles/kubernetes/kubeadm/worker/tasks/main.yml b/roles/kubernetes/kubeadm/worker/tasks/main.yml new file mode 100644 index 00000000..eabb7a1f --- /dev/null +++ b/roles/kubernetes/kubeadm/worker/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: join kubernetes worker node and store log + block: + - name: join kubernetes worker node + command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'" + args: + creates: /etc/kubernetes/kubelet.conf + register: kubeadm_join + + always: + - name: dump output of kubeadm join to log file + when: kubeadm_join is changed + # This is not a handler by design to make sure this action runs at this point of the play. + copy: # noqa 503 + content: "{{ kubeadm_join.stdout }}\n" + dest: /etc/kubernetes/kubeadm-join.log + + - name: dump error output of kubeadm join to log file + when: kubeadm_join.changed and kubeadm_join.stderr + copy: + content: "{{ kubeadm_join.stderr }}\n" + dest: /etc/kubernetes/kubeadm-join.errors -- cgit v1.2.3 From 09c8120540735c22316a55593f4c56bcd6ae7e88 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 01:08:36 +0200 Subject: add support for cluster with kubernetes 1.24 --- inventory/group_vars/k8s-chtest/vars.yml | 2 +- roles/kubernetes/kubeadm/control-plane/tasks/primary.yml | 6 +++--- .../kubeadm/control-plane/templates/kubeadm.config.j2 | 12 +++++++----- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'roles/kubernetes/kubeadm') diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index 66824314..939d93da 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -1,5 +1,5 @@ --- -kubernetes_version: 1.23.6 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kube-router kubernetes_network_plugin_version: 1.4.0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml index 22a5af42..450c3a1a 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -28,8 +28,8 @@ - name: initialize kubernetes primary control-plane node and store log block: - name: initialize kubernetes primary control-plane node - command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print" - # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" + command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --skip-token-print" + # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print" args: creates: /etc/kubernetes/pki/ca.crt register: kubeadm_init @@ -47,7 +47,7 @@ content: "{{ kubeadm_init.stderr }}\n" dest: /etc/kubernetes/kubeadm-init.errors - - name: create bootstrap token for existing cluster + - name: create bootstrap token for new cluster command: kubeadm token create --ttl 42m check_mode: no register: kubeadm_token_generate diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 index 2fa98ed6..a0f3efe7 100644 --- a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 @@ -1,6 +1,6 @@ -{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #} +{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 #} {# #} -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration {# TODO: this is ugly but we want to create our own token so we can #} {# better control it's lifetime #} @@ -11,10 +11,14 @@ localAPIEndpoint: {% if kubernetes_overlay_node_ip is defined %} advertiseAddress: {{ kubernetes_overlay_node_ip }} {% endif %} +{% if kubernetes_network_plugin_replaces_kube_proxy %} +skipPhases: +- addon/kube-proxy +{% endif %} nodeRegistration: criSocket: {{ kubernetes_cri_socket }} --- -apiVersion: kubeadm.k8s.io/v1beta2 +apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: {{ kubernetes_version }} clusterName: {{ kubernetes.cluster_name }} @@ -43,8 +47,6 @@ controllerManager: extraArgs: node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}" scheduler: {} -dns: - type: CoreDNS --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration -- cgit v1.2.3 From 05e65f43df9c502eb764b184a66dd1ef5a76685c Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 01:55:09 +0200 Subject: k8s/kubeadm: fix some minor TODOs --- roles/kubernetes/addons/metrics-server/tasks/main.yml | 11 ++++++++--- .../kubeadm/control-plane/tasks/net_kube-router.yml | 11 ++++++++--- .../kubeadm/control-plane/tasks/net_kubeguard.yml | 11 ++++++++--- roles/kubernetes/kubeadm/control-plane/tasks/primary.yml | 14 +++++++++----- 4 files changed, 33 insertions(+), 14 deletions(-) (limited to 'roles/kubernetes/kubeadm') diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml index 5236e4e3..87c57346 100644 --- a/roles/kubernetes/addons/metrics-server/tasks/main.yml +++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml @@ -9,8 +9,13 @@ src: "components.{{ kubernetes_metrics_server_version }}.yml.j2" dest: /etc/kubernetes/addons/metrics-server/config.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if metrics-server is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/addons/metrics-server/config.yml + failed_when: false + changed_when: false + register: kube_metrics_server_diff_result + - name: install metrics-server onto the cluster + when: kube_metrics_server_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/addons/metrics-server/config.yml - register: kube_metrics_server_apply_result - changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml index 0a216414..4584e583 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml @@ -4,8 +4,13 @@ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) +- name: check if kube-router is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kube_router_diff_result + - name: install kube-router on to the cluster + when: kube_router_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kube_router_apply_result - changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml index a572ca89..66dac49b 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml @@ -7,8 +7,13 @@ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2" dest: /etc/kubernetes/network-plugin.yml - ## TODO: move to server-side apply (GA since 1.22) + - name: check if kubeguard (kube-router) is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml + failed_when: false + changed_when: false + register: kubeguard_diff_result + - name: install kubeguard (kube-router) on to the cluster + when: kubeguard_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml - register: kubeguard_apply_result - changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml index 450c3a1a..65a6f7c8 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml @@ -4,7 +4,6 @@ path: /etc/kubernetes/kubelet.conf register: kubeconfig_kubelet_stats - ## TODO: switch to kubeadm config version v1beta3 (available since 1.22) - name: generate kubeadm.config template: src: kubeadm.config.j2 @@ -118,11 +117,16 @@ src: node-local-dns.yml.j2 dest: /etc/kubernetes/node-local-dns.yml - ## TODO: move to server-side apply (GA since 1.22) -- name: install node-local dns cache +- name: check if node-local dns cache is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/node-local-dns.yml + failed_when: false + changed_when: false + register: kube_node_local_dns_diff_result + +- name: install node-local dns cache + when: kube_node_local_dns_diff_result.rc != 0 command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml - register: kube_node_local_dns_apply_result - changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0 ## Network Plugin -- cgit v1.2.3 From 40f958ce64fc08b5fb35aac3f05941fe4b514ec5 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 8 May 2022 02:17:33 +0200 Subject: kubernetes/kubeadm: fix kubeguard network plugin --- inventory/group_vars/k8s-emc/vars.yml | 4 ++-- roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml | 4 ++-- .../kubeadm/base/templates/net_kubeguard/cni.conflist.j2 | 16 ++++++++++++++++ .../kubeadm/base/templates/net_kubeguard/cni.json.j2 | 12 ------------ 4 files changed, 20 insertions(+), 16 deletions(-) create mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 delete mode 100644 roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 (limited to 'roles/kubernetes/kubeadm') diff --git a/inventory/group_vars/k8s-emc/vars.yml b/inventory/group_vars/k8s-emc/vars.yml index b2a8fe39..be1c4818 100644 --- a/inventory/group_vars/k8s-emc/vars.yml +++ b/inventory/group_vars/k8s-emc/vars.yml @@ -1,5 +1,5 @@ --- -kubernetes_version: 1.23.1 +kubernetes_version: 1.24.0 kubernetes_container_runtime: containerd kubernetes_network_plugin: kubeguard @@ -48,4 +48,4 @@ kubeguard: kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ipaddr(1) | ipaddr('address') }}" -kubernetes_metrics_server_version: 0.5.2 +kubernetes_metrics_server_version: 0.6.1 diff --git a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml index 40cee3b7..350ecdee 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_kubeguard.yml @@ -87,8 +87,8 @@ - name: install cni config template: - src: net_kubeguard/cni.json.j2 - dest: /etc/cni/net.d/kubeguard.conf + src: net_kubeguard/cni.conflist.j2 + dest: /etc/cni/net.d/kubeguard.conflist - name: install packages needed for debugging kube-router when: kubernetes_network_plugin_variant == 'with-kube-router' diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 new file mode 100644 index 00000000..240d86ef --- /dev/null +++ b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.1", + "name": "kubeguard", + "plugins": [ + { + "type": "bridge", + "bridge": "kubeguard-br0", + "isDefaultGateway": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" + } + } + ] +} diff --git a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 b/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 deleted file mode 100644 index eb9e3d61..00000000 --- a/roles/kubernetes/kubeadm/base/templates/net_kubeguard/cni.json.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cniVersion": "0.3.1", - "name": "kubeguard", - "type": "bridge", - "bridge": "kubeguard-br0", - "isDefaultGateway": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "subnet": "{{ kubernetes.pod_ip_range | ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) }}" - } -} -- cgit v1.2.3