summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--inventory/group_vars/k8s-chtest/vars.yml7
-rw-r--r--inventory/group_vars/kubernetes-cluster/vars.yml5
-rw-r--r--roles/kubernetes/kubeadm/control-plane/defaults/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml26
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml19
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml5
-rw-r--r--roles/kubernetes/kubeadm/control-plane/tasks/primary.yml24
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j24
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2 (renamed from roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2)0
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2170
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2170
-rw-r--r--roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2213
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml3
13 files changed, 278 insertions, 370 deletions
diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml
index df94ef13..9552f5e5 100644
--- a/inventory/group_vars/k8s-chtest/vars.yml
+++ b/inventory/group_vars/k8s-chtest/vars.yml
@@ -5,8 +5,15 @@ kubernetes_version: 1.27.1
kubernetes_cri_tools_pkg_version: 1.26.0-00
kubernetes_container_runtime: containerd
containerd_pkg_provider: docker-com
+
+#kubernetes_network_plugin: kube-router
+#kubernetes_network_plugin_version: 1.5.1
+#kubernetes_network_plugin_replaces_kube_proxy: yes
+#kubernetes_enable_nodelocal_dnscache: yes
+
kubernetes_network_plugin: none
kubernetes_network_plugin_replaces_kube_proxy: yes
+kubernetes_enable_nodelocal_dnscache: no
kubernetes:
diff --git a/inventory/group_vars/kubernetes-cluster/vars.yml b/inventory/group_vars/kubernetes-cluster/vars.yml
index 85db9949..5cc246ec 100644
--- a/inventory/group_vars/kubernetes-cluster/vars.yml
+++ b/inventory/group_vars/kubernetes-cluster/vars.yml
@@ -1,6 +1,5 @@
---
kubernetes_node_name: "{{ inventory_hostname }}"
-kubernetes_network_plugin_replaces_kube_proxy: false
-
-kubernetes_nodelocal_dnscache_ip: 169.254.20.10
+kubernetes_network_plugin_replaces_kube_proxy: no
+kubernetes_enable_nodelocal_dnscache: yes
diff --git a/roles/kubernetes/kubeadm/control-plane/defaults/main.yml b/roles/kubernetes/kubeadm/control-plane/defaults/main.yml
new file mode 100644
index 00000000..c1149988
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+kubernetes_nodelocal_dnscache_ip: 169.254.20.10
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
index 4584e583..aad6467b 100644
--- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kube-router.yml
@@ -2,15 +2,35 @@
- name: generate kube-router configuration
template:
src: "net_kube-router/config.{{ kubernetes_network_plugin_version }}.yml.j2"
- dest: /etc/kubernetes/network-plugin.yml
+ dest: /etc/kubernetes/network-plugin/config.yml
- name: check if kube-router is already installed
check_mode: no
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin.yml
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin/config.yml
failed_when: false
changed_when: false
register: kube_router_diff_result
- name: install kube-router on to the cluster
when: kube_router_diff_result.rc != 0
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin.yml
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin/config.yml
+
+
+- name: install node-local dns cache
+ when: kubernetes_enable_nodelocal_dnscache
+ block:
+ - name: generate node-local dns cache config
+ template:
+ src: net_kube-router/node-local-dns.yml.j2
+ dest: /etc/kubernetes/network-plugin/node-local-dns.yml
+
+ - name: check if node-local dns cache is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin/node-local-dns.yml
+ failed_when: false
+ changed_when: false
+ register: kube_node_local_dns_diff_result
+
+ - name: install node-local dns cache
+ when: kube_node_local_dns_diff_result.rc != 0
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin/node-local-dns.yml
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
index 94832c38..05428ea1 100644
--- a/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_kubeguard.yml
@@ -1,2 +1,19 @@
---
-## nothing to do here
+- name: install node-local dns cache
+ when: kubernetes_enable_nodelocal_dnscache
+ block:
+ - name: generate node-local dns cache config
+ template:
+ src: net_kubeguard/node-local-dns.yml.j2
+ dest: /etc/kubernetes/network-plugin/node-local-dns.yml
+
+ - name: check if node-local dns cache is already installed
+ check_mode: no
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin/node-local-dns.yml
+ failed_when: false
+ changed_when: false
+ register: kube_node_local_dns_diff_result
+
+ - name: install node-local dns cache
+ when: kube_node_local_dns_diff_result.rc != 0
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin/node-local-dns.yml
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
index bf1a16d5..4b5824d4 100644
--- a/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_none.yml
@@ -1,2 +1,5 @@
---
-## this "plugin" is for testing purposes only
+- name: install node-local dns cache
+ when: kubernetes_enable_nodelocal_dnscache
+ debug:
+ msg: "the 'none' network-plugin is intended to manually install/test network-plugins and since the exact deployment variant for the node-local dns cache is highly dependent on what the network plugin supports we do not install anything here. If the network plugin does not support nodelocal dns caches at all you may want to set kubernetes_enable_nodelocal_dnscache to false."
diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
index 36195235..c0591032 100644
--- a/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
+++ b/roles/kubernetes/kubeadm/control-plane/tasks/primary.yml
@@ -101,26 +101,12 @@
kube_bootstrap_ca_cert_hash: "sha256:{{ kube_ca_openssl.stdout }}"
-## install node-local-dns
-
-- name: generate node-local dns cache config
- template:
- src: node-local-dns.yml.j2
- dest: /etc/kubernetes/node-local-dns.yml
-
-- name: check if node-local dns cache is already installed
- check_mode: no
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/node-local-dns.yml
- failed_when: false
- changed_when: false
- register: kube_node_local_dns_diff_result
-
-- name: install node-local dns cache
- when: kube_node_local_dns_diff_result.rc != 0
- command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml
-
-
## Network Plugin
+- name: create network plugin config directory
+ file:
+ path: /etc/kubernetes/network-plugin
+ state: directory
+
- name: install network plugin
include_tasks: "net_{{ kubernetes_network_plugin }}.yml"
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2 b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
index 9dfd6825..3a82a8e1 100644
--- a/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/kubeadm-init.config.j2
@@ -56,6 +56,8 @@ scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
+cgroupDriver: systemd
+{% if kubernetes_enable_nodelocal_dnscache %}
clusterDNS:
- "{{ kubernetes_nodelocal_dnscache_ip }}"
-cgroupDriver: systemd
+{% endif %}
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2
index 22ae6c22..22ae6c22 100644
--- a/roles/kubernetes/kubeadm/control-plane/templates/node-local-dns.yml.j2
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
deleted file mode 100644
index e343f4a7..00000000
--- a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.0.4.0.yml.j2
+++ /dev/null
@@ -1,170 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: kube-router-kubeconfig
- namespace: kube-system
- labels:
- tier: node
- k8s-app: kube-router
-data:
- kubeconfig.conf: |
- apiVersion: v1
- kind: Config
- clusters:
- - cluster:
- certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
- name: default
- contexts:
- - context:
- cluster: default
- namespace: default
- user: default
- name: default
- current-context: default
- users:
- - name: default
- user:
- tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- labels:
- k8s-app: kube-router
- tier: node
- name: kube-router
- namespace: kube-system
-spec:
- selector:
- matchLabels:
- k8s-app: kube-router
- tier: node
- template:
- metadata:
- labels:
- k8s-app: kube-router
- tier: node
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/port: "8080"
- spec:
- priorityClassName: system-node-critical
- serviceAccountName: kube-router
- serviceAccount: kube-router
- containers:
- - name: kube-router
- image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
- imagePullPolicy: Always
- args:
- - --cluster-cidr={{ kubernetes.pod_ip_range }}
- - --run-router=false
- - --run-firewall=true
- - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
- - --kubeconfig=/var/lib/kube-router/kubeconfig
- - --hairpin-mode
- - --iptables-sync-period=10s
- - --ipvs-sync-period=10s
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- livenessProbe:
- httpGet:
- path: /healthz
- port: 20244
- initialDelaySeconds: 10
- periodSeconds: 3
- resources:
- requests:
- cpu: 250m
- memory: 250Mi
- securityContext:
- privileged: true
- volumeMounts:
- - name: lib-modules
- mountPath: /lib/modules
- readOnly: true
- - name: kubeconfig
- mountPath: /var/lib/kube-router
- readOnly: true
- - name: xtables-lock
- mountPath: /run/xtables.lock
- readOnly: false
- hostNetwork: true
- tolerations:
- - effect: NoSchedule
- operator: Exists
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- volumes:
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: kubeconfig
- configMap:
- name: kube-router-kubeconfig
- items:
- - key: kubeconfig.conf
- path: kubeconfig
- - name: xtables-lock
- hostPath:
- path: /run/xtables.lock
- type: FileOrCreate
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: kube-router
- namespace: kube-system
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: kube-router
- namespace: kube-system
-rules:
- - apiGroups:
- - ""
- resources:
- - namespaces
- - pods
- - services
- - nodes
- - endpoints
- verbs:
- - list
- - get
- - watch
- - apiGroups:
- - "networking.k8s.io"
- resources:
- - networkpolicies
- verbs:
- - list
- - get
- - watch
- - apiGroups:
- - extensions
- resources:
- - networkpolicies
- verbs:
- - get
- - list
- - watch
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: kube-router
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: kube-router
-subjects:
-- kind: ServiceAccount
- name: kube-router
- namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
deleted file mode 100644
index ec30d670..00000000
--- a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/kube-router.1.1.1.yml.j2
+++ /dev/null
@@ -1,170 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: kube-router-kubeconfig
- namespace: kube-system
- labels:
- tier: node
- k8s-app: kube-router
-data:
- kubeconfig.conf: |
- apiVersion: v1
- kind: Config
- clusters:
- - cluster:
- certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
- name: default
- contexts:
- - context:
- cluster: default
- namespace: default
- user: default
- name: default
- current-context: default
- users:
- - name: default
- user:
- tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
----
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- labels:
- k8s-app: kube-router
- tier: node
- name: kube-router
- namespace: kube-system
-spec:
- selector:
- matchLabels:
- k8s-app: kube-router
- tier: node
- template:
- metadata:
- labels:
- k8s-app: kube-router
- tier: node
- annotations:
- prometheus.io/scrape: "true"
- prometheus.io/port: "8080"
- spec:
- priorityClassName: system-node-critical
- serviceAccountName: kube-router
- serviceAccount: kube-router
- containers:
- - name: kube-router
- image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
- imagePullPolicy: Always
- args:
- - --run-router=false
- - --run-firewall=true
- - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
- - --bgp-graceful-restart=true
- - --kubeconfig=/var/lib/kube-router/kubeconfig
- - --hairpin-mode
- - --iptables-sync-period=10s
- - --ipvs-sync-period=10s
- env:
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- livenessProbe:
- httpGet:
- path: /healthz
- port: 20244
- initialDelaySeconds: 10
- periodSeconds: 3
- resources:
- requests:
- cpu: 250m
- memory: 250Mi
- securityContext:
- privileged: true
- volumeMounts:
- - name: lib-modules
- mountPath: /lib/modules
- readOnly: true
- - name: kubeconfig
- mountPath: /var/lib/kube-router
- readOnly: true
- - name: xtables-lock
- mountPath: /run/xtables.lock
- readOnly: false
- hostNetwork: true
- tolerations:
- - effect: NoSchedule
- operator: Exists
- - key: CriticalAddonsOnly
- operator: Exists
- - effect: NoExecute
- operator: Exists
- volumes:
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: kubeconfig
- configMap:
- name: kube-router-kubeconfig
- items:
- - key: kubeconfig.conf
- path: kubeconfig
- - name: xtables-lock
- hostPath:
- path: /run/xtables.lock
- type: FileOrCreate
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: kube-router
- namespace: kube-system
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: kube-router
- namespace: kube-system
-rules:
- - apiGroups:
- - ""
- resources:
- - namespaces
- - pods
- - services
- - nodes
- - endpoints
- verbs:
- - list
- - get
- - watch
- - apiGroups:
- - "networking.k8s.io"
- resources:
- - networkpolicies
- verbs:
- - list
- - get
- - watch
- - apiGroups:
- - extensions
- resources:
- - networkpolicies
- verbs:
- - get
- - list
- - watch
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: kube-router
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: kube-router
-subjects:
-- kind: ServiceAccount
- name: kube-router
- namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2
new file mode 100644
index 00000000..22ae6c22
--- /dev/null
+++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2
@@ -0,0 +1,213 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-dns-upstream
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "KubeDNSUpstream"
+spec:
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ targetPort: 53
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
+ targetPort: 53
+ selector:
+ k8s-app: kube-dns
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+data:
+ Corefile: |
+ {{ kubernetes.dns_domain | default('cluster.local') }}:53 {
+ errors
+ cache {
+ success 9984 30
+ denial 9984 5
+ }
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ health {{ kubernetes_nodelocal_dnscache_ip }}:8080
+ }
+ in-addr.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ ip6.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ .:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__UPSTREAM__SERVERS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ k8s-app: node-local-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ selector:
+ matchLabels:
+ k8s-app: node-local-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: node-local-dns
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: node-local-dns
+ hostNetwork: true
+ dnsPolicy: Default # Don't use cluster DNS.
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ containers:
+ - name: node-cache
+ image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.20
+ resources:
+ requests:
+ cpu: 25m
+ memory: 5Mi
+ args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ]
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9253
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ host: {{ kubernetes_nodelocal_dnscache_ip }}
+ path: /health
+ port: 8080
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ volumeMounts:
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - name: config-volume
+ mountPath: /etc/coredns
+ - name: kube-dns-config
+ mountPath: /etc/kube-dns
+ volumes:
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ - name: kube-dns-config
+ configMap:
+ name: kube-dns
+ optional: true
+ - name: config-volume
+ configMap:
+ name: node-local-dns
+ items:
+ - key: Corefile
+ path: Corefile.base
+---
+# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods.
+# We use this to expose metrics to Prometheus.
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ labels:
+ k8s-app: node-local-dns
+ name: node-local-dns
+ namespace: kube-system
+spec:
+ clusterIP: None
+ ports:
+ - name: metrics
+ port: 9253
+ targetPort: 9253
+ selector:
+ k8s-app: node-local-dns
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index d89dd32f..29b88847 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -11,8 +11,7 @@
- /etc/kubernetes/kubeadm-join.errors
- /etc/kubernetes/pki
- /etc/kubernetes/encryption
- - /etc/kubernetes/network-plugin.yml
- - /etc/kubernetes/node-local-dns.yml
+ - /etc/kubernetes/network-plugin
- /etc/kubernetes/addons
- /etc/kubernetes/decorations
- /etc/default/kubelet