summaryrefslogtreecommitdiff
path: root/roles/kubernetes/kubeadm/control-plane
diff options
context:
space:
mode:
Diffstat (limited to 'roles/kubernetes/kubeadm/control-plane')
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/main.yml76
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml11
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml14
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml2
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/primary.yml131
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml55
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j213
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j253
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2235
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2236
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2236
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2170
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2170
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2211
14 files changed, 1613 insertions, 0 deletions
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/main.yml b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
new file mode 100644
index 00000000..d5bd378e
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/main.yml
@@ -0,0 +1,76 @@
+---
+- name: create direcotry for encryption config
+ file:
+ name: /etc/kubernetes/encryption
+ state: directory
+ mode: 0700
+
+- name: install encryption config
+ template:
+ src: encryption-config.j2
+ dest: /etc/kubernetes/encryption/config
+ mode: 0600
+
+
+- name: install primary control-plane node
+ include_tasks: primary.yml
+ when: "'_kubernetes_primary_controlplane_node_' in group_names"
+
+- name: install secondary control-plane nodes
+ include_tasks: secondary.yml
+ when: "'_kubernetes_primary_controlplane_node_' not in group_names"
+
+
+- name: check if control-plane node is tainted (1/2)
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf get node {{ inventory_hostname }} -o json"
+ check_mode: no
+ register: kubectl_get_node
+ changed_when: False
+
+- name: check if control-plane node is tainted (2/2)
+ set_fact:
+ kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
+
+- name: remove taint from control-plane node
+ when: not kubernetes.dedicated_controlplane_nodes
+ block:
+ - name: remove control-plane taint from node
+ when: "'node-role.kubernetes.io/control-plane' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-"
+
+ - name: remove deprecated master taint from node
+ when: "'node-role.kubernetes.io/master' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
+
+- name: add taint from control-plane node
+ when: kubernetes.dedicated_controlplane_nodes
+ block:
+ - name: add control-plane taint to node
+ when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
+
+ - name: add deprecated master taint to node
+ when: "'node-role.kubernetes.io/master' not in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
+
+- name: prepare kubectl (1/2)
+ file:
+ name: /root/.kube
+ state: directory
+
+- name: prepare kubectl (2/2)
+ file:
+ dest: /root/.kube/config
+ src: /etc/kubernetes/admin.conf
+ state: link
+
+- name: add kubectl completion config for shells
+ loop:
+ - zsh
+ - bash
+ blockinfile:
+ path: "/root/.{{ item }}rc"
+ create: yes
+ marker: "### {mark} ANSIBLE MANAGED BLOCK for kubectl ###"
+ content: |
+ source <(kubectl completion {{ item }})
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
new file mode 100644
index 00000000..0a216414
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
@@ -0,0 +1,11 @@
+---
+- name: generate kube-router configuration
+ template:
+ src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+ ## TODO: move to server-side apply (GA since 1.22)
+- name: install kube-router on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kube_router_apply_result
+ changed_when: (kube_router_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
new file mode 100644
index 00000000..a572ca89
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
@@ -0,0 +1,14 @@
+---
+- name: install kube-router variant
+ when: "kubernetes_network_plugin_variant == 'with-kube-router'"
+ block:
+ - name: generate kubeguard (kube-router) configuration
+ template:
+ src: "net_kubeguard/kube-router.{{ kubernetes_network_plugin_version }}.yml.j2"
+ dest: /etc/kubernetes/network-plugin.yml
+
+ ## TODO: move to server-side apply (GA since 1.22)
+ - name: install kubeguard (kube-router) on to the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ register: kubeguard_apply_result
+ changed_when: (kubeguard_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
new file mode 100644
index 00000000..bf1a16d5
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
@@ -0,0 +1,2 @@
+---
+## this "plugin" is for testing purposes only
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
new file mode 100644
index 00000000..22a5af42
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
@@ -0,0 +1,131 @@
+---
+- name: check if kubeconfig kubelet.conf already exists
+ stat:
+ path: /etc/kubernetes/kubelet.conf
+ register: kubeconfig_kubelet_stats
+
+ ## TODO: switch to kubeadm config version v1beta3 (available since 1.22)
+- name: generate kubeadm.config
+ template:
+ src: kubeadm.config.j2
+ dest: /etc/kubernetes/kubeadm.config
+ register: kubeadm_config
+
+### cluster not yet initialized
+
+- name: create new cluster
+ when: not kubeconfig_kubelet_stats.stat.exists
+ block:
+
+ #### kubeadm wants token to come from --config if --config is used
+ #### i think this is stupid -> TODO: send bug report
+ # - name: generate bootstrap token for new cluster
+ # command: kubeadm token generate
+ # changed_when: False
+ # check_mode: no
+ # register: kubeadm_token_generate
+
+ - name: initialize kubernetes primary control-plane node and store log
+ block:
+ - name: initialize kubernetes primary control-plane node
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_network_plugin_replaces_kube_proxy %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ args:
+ creates: /etc/kubernetes/pki/ca.crt
+ register: kubeadm_init
+
+ always:
+ - name: dump output of kubeadm init to log file
+ when: kubeadm_init.changed
+ copy:
+ content: "{{ kubeadm_init.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-init.log
+
+ - name: dump error output of kubeadm init to log file
+ when: kubeadm_init.changed and kubeadm_init.stderr
+ copy:
+ content: "{{ kubeadm_init.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-init.errors
+
+ - name: create bootstrap token for existing cluster
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_generate
+
+
+### cluster is already initialized but config has changed
+
+- name: upgrade cluster config
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is changed
+ block:
+
+ - name: fail for cluster upgrades
+ fail:
+ msg: "upgrading cluster config is currently not supported!"
+
+
+### cluster is already initialized
+
+- name: prepare cluster for new nodes
+ when: kubeconfig_kubelet_stats.stat.exists and kubeadm_config is not changed
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: create bootstrap token for existing cluster
+ when: "groups['_kubernetes_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm token create --ttl 42m
+ check_mode: no
+ register: kubeadm_token_create
+
+
+## calculate certificate digest
+
+- name: install openssl
+ apt:
+ name: openssl
+ state: present
+
+- name: get ca certificate digest
+ shell: "set -o pipefail && openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'"
+ args:
+ executable: /bin/bash
+ check_mode: no
+ register: kube_ca_openssl
+ changed_when: False
+
+- name: set variables needed by kubernetes/nodes to join the cluster
+ set_fact:
+ kube_bootstrap_token: "{% if kubeadm_token_generate.stdout is defined %}{{ kubeadm_token_generate.stdout }}{% elif kubeadm_token_create.stdout is defined %}{{ kubeadm_token_create.stdout }}{% endif %}"
+ kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ loop: "{{ groups['_kubernetes_nodes_'] }}"
+
+
+## install node-local-dns
+
+- name: generate node-local dns cache config
+ template:
+ src: node-local-dns.yml.j2
+ dest: /etc/kubernetes/node-local-dns.yml
+
+ ## TODO: move to server-side apply (GA since 1.22)
+- name: install node-local dns cache
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml
+ register: kube_node_local_dns_apply_result
+ changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
+
+
+## Network Plugin
+
+- name: install network plugin
+ include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
new file mode 100644
index 00000000..a2dbe081
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/secondary.yml
@@ -0,0 +1,55 @@
+---
+- name: fetch secrets needed for secondary control-plane node
+ run_once: true
+ delegate_to: "{{ groups['_kubernetes_primary_controlplane_node_'] | first }}"
+ block:
+
+ - name: fetch list of current nodes
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes -o name
+ changed_when: False
+ check_mode: no
+ register: kubectl_node_list
+
+ - name: save list of current nodes
+ set_fact:
+ kubernetes_current_nodes: "{{ kubectl_node_list.stdout_lines | map('replace', 'node/', '') | list }}"
+
+ - name: upload certs
+ when: "groups['_kubernetes_controlplane_nodes_'] | difference(kubernetes_current_nodes) | length > 0"
+ command: kubeadm init phase upload-certs --upload-certs
+ check_mode: no
+ register: kubeadm_upload_certs
+
+
+- name: extracting encryption key for certs
+ set_fact:
+ kubeadm_upload_certs_key: "{% if kubeadm_upload_certs.stdout is defined %}{{ kubeadm_upload_certs.stdout_lines | last }}{% endif %}"
+
+- name: join kubernetes secondary control-plane node and store log
+ block:
+ - name: join kubernetes secondary control-plane node
+ throttle: 1
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %} --cri-socket {{ kubernetes_cri_socket }} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
+ args:
+ creates: /etc/kubernetes/kubelet.conf
+ register: kubeadm_join
+
+ always:
+ - name: dump output of kubeadm join to log file
+ when: kubeadm_join is changed
+ # This is not a handler by design to make sure this action runs at this point of the play.
+ copy: # noqa 503
+ content: "{{ kubeadm_join.stdout }}\n"
+ dest: /etc/kubernetes/kubeadm-join.log
+
+ - name: dump error output of kubeadm join to log file
+ when: kubeadm_join.changed and kubeadm_join.stderr
+ copy:
+ content: "{{ kubeadm_join.stderr }}\n"
+ dest: /etc/kubernetes/kubeadm-join.errors
+
+ # TODO: acutally check if node has registered
+- name: give the new control-plane node(s) a moment to register
+ when: kubeadm_join is changed
+ pause: # noqa 503
+ seconds: 5
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
new file mode 100644
index 00000000..345c9bf9
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/encryption-config.j2
@@ -0,0 +1,13 @@
+kind: EncryptionConfiguration
+apiVersion: apiserver.config.k8s.io/v1
+resources:
+ - resources:
+ - secrets
+ providers:
+ - secretbox:
+ keys:
+{% for key in kubernetes_secrets.encryption_config_keys %}
+ - name: key{{ loop.index }}
+ secret: {{ key }}
+{% endfor %}
+ - identity: {}
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
new file mode 100644
index 00000000..2fa98ed6
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm.config.j2
@@ -0,0 +1,53 @@
+{# https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 #}
+{# #}
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: InitConfiguration
+{# TODO: this is ugly but we want to create our own token so we can #}
+{# better control it's lifetime #}
+bootstrapTokens:
+- ttl: "1s"
+localAPIEndpoint:
+ bindPort: 6442
+{% if kubernetes_overlay_node_ip is defined %}
+ advertiseAddress: {{ kubernetes_overlay_node_ip }}
+{% endif %}
+nodeRegistration:
+ criSocket: {{ kubernetes_cri_socket }}
+---
+apiVersion: kubeadm.k8s.io/v1beta2
+kind: ClusterConfiguration
+kubernetesVersion: {{ kubernetes_version }}
+clusterName: {{ kubernetes.cluster_name }}
+imageRepository: k8s.gcr.io
+controlPlaneEndpoint: 127.0.0.1:6443
+networking:
+ dnsDomain: {{ kubernetes.dns_domain | default('cluster.local') }}
+ podSubnet: {{ kubernetes.pod_ip_range }}
+ serviceSubnet: {{ kubernetes.service_ip_range }}
+apiServer:
+ extraArgs:
+ encryption-provider-config: /etc/kubernetes/encryption/config
+ extraVolumes:
+ - name: encryption-config
+ hostPath: /etc/kubernetes/encryption
+ mountPath: /etc/kubernetes/encryption
+ readOnly: true
+ pathType: Directory
+{% if (kubernetes.api_extra_sans | default([]) | length) == 0 %}
+ certSANs: []
+{% else %}
+ certSANs:
+ {{ kubernetes.api_extra_sans | to_nice_yaml | indent(width=2) }}
+{% endif %}
+controllerManager:
+ extraArgs:
+ node-cidr-mask-size: "{{ kubernetes.pod_ip_range_size }}"
+scheduler: {}
+dns:
+ type: CoreDNS
+---
+apiVersion: kubelet.config.k8s.io/v1beta1
+kind: KubeletConfiguration
+clusterDNS:
+- {{ kubernetes_nodelocal_dnscache_ip }}
+cgroupDriver: systemd
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
new file mode 100644
index 00000000..a2660db2
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.0.4.0.yml.j2
@@ -0,0 +1,235 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
new file mode 100644
index 00000000..382164cb
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.1.1.yml.j2
@@ -0,0 +1,236 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
new file mode 100644
index 00000000..382164cb
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/config.1.4.0.yml.j2
@@ -0,0 +1,236 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
new file mode 100644
index 00000000..e343f4a7
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
@@ -0,0 +1,170 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --cluster-cidr={{ kubernetes.pod_ip_range }}
+ - --run-router=false
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
new file mode 100644
index 00000000..ec30d670
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
@@ -0,0 +1,170 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=false
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
new file mode 100644
index 00000000..d536d5a7
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
@@ -0,0 +1,211 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-dns-upstream
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "KubeDNSUpstream"
+spec:
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ targetPort: 53
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
+ targetPort: 53
+ selector:
+ k8s-app: kube-dns
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+data:
+ Corefile: |
+ {{ kubernetes.dns_domain | default('cluster.local') }}:53 {
+ errors
+ cache {
+ success 9984 30
+ denial 9984 5
+ }
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ health {{ kubernetes_nodelocal_dnscache_ip }}:8080
+ }
+ in-addr.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ ip6.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ .:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__UPSTREAM__SERVERS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ k8s-app: node-local-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ selector:
+ matchLabels:
+ k8s-app: node-local-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: node-local-dns
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: node-local-dns
+ hostNetwork: true
+ dnsPolicy: Default # Don't use cluster DNS.
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ containers:
+ - name: node-cache
+ image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0
+ resources:
+ requests:
+ cpu: 25m
+ memory: 5Mi
+ args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ]
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9253
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ host: {{ kubernetes_nodelocal_dnscache_ip }}
+ path: /health
+ port: 8080
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ volumeMounts:
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - name: config-volume
+ mountPath: /etc/coredns
+ - name: kube-dns-config
+ mountPath: /etc/kube-dns
+ volumes:
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ - name: kube-dns-config
+ configMap:
+ name: kube-dns
+ optional: true
+ - name: config-volume
+ configMap:
+ name: node-local-dns
+ items:
+ - key: Corefile
+ path: Corefile.base
+---
+# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods.
+# We use this to expose metrics to Prometheus.
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ labels:
+ k8s-app: node-local-dns
+ name: node-local-dns
+ namespace: kube-system
+spec:
+ clusterIP: None
+ ports:
+ - name: metrics
+ port: 9253
+ targetPort: 9253
+ selector:
+ k8s-app: node-local-dns