diff options
6 files changed, 281 insertions, 57 deletions
diff --git a/inventory/group_vars/k8s-chtest/vars.yml b/inventory/group_vars/k8s-chtest/vars.yml index 3ab3fe7a..709a6cdc 100644 --- a/inventory/group_vars/k8s-chtest/vars.yml +++ b/inventory/group_vars/k8s-chtest/vars.yml @@ -33,38 +33,39 @@ kubernetes_secrets: ### kubeguard # -kubernetes_network_plugin: kubeguard -kubernetes_network_plugin_replaces_kube_proxy: no -kubernetes_kube_proxy_mode: ipvs -kubernetes_enable_nodelocal_dnscache: yes -kubeguard: - ## Mind that pod_ip_range and service_ip_range overlap and kubeguard - ## needs a /24 for addresses assigned to tunnel devices. This means that - ## node_indeces must be in the range between 1 and 191 -> 190 hosts possible - ## - ## hardcoded hostnames are not nice but if we do this via host_vars - ## the info is spread over multiple files and this makes it more diffcult - ## to find mistakes, so it is nicer to keep it in one place... - node_index: - ch-calypso: 125 - ch-thetys: 126 - ch-k8s-ctrl: 127 -kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ansible.utils.ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" +#kubernetes_network_plugin: kubeguard +#kubernetes_network_plugin_replaces_kube_proxy: no +#kubernetes_kube_proxy_mode: ipvs +#kubernetes_enable_nodelocal_dnscache: yes +#kubeguard: +# ## Mind that pod_ip_range and service_ip_range overlap and kubeguard +# ## needs a /24 for addresses assigned to tunnel devices. This means that +# ## node_indeces must be in the range between 1 and 191 -> 190 hosts possible +# ## +# ## hardcoded hostnames are not nice but if we do this via host_vars +# ## the info is spread over multiple files and this makes it more diffcult +# ## to find mistakes, so it is nicer to keep it in one place... +# node_index: +# ch-calypso: 125 +# ch-thetys: 126 +# ch-k8s-ctrl: 127 +#kubernetes_overlay_node_ip: "{{ kubernetes.pod_ip_range | ansible.utils.ipsubnet(kubernetes.pod_ip_range_size, kubeguard.node_index[inventory_hostname]) | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" ### Cilium # -#kubernetes_network_plugin: cilium -#kubernetes_network_plugin_version: 1.13.2 -#kubernetes_network_plugin_replaces_kube_proxy: yes -#kubernetes_enable_nodelocal_dnscache: no -#kubernetes_cilium_config: -# ipam: kubernetes -# tunnel: disabled -# ipv4-native-routing-cidr: 192.168.28.0/24 -# auto-direct-node-routes: yes -#base_sysctl_config_user: -# net.ipv4.conf.all.rp_filter: 0 -# net.ipv4.conf.default.rp_filter: 0 +kubernetes_network_plugin: cilium +kubernetes_network_plugin_version: 1.13.2 +kubernetes_network_plugin_replaces_kube_proxy: yes +kubernetes_enable_nodelocal_dnscache: yes +kubernetes_cilium_config: + ipam: kubernetes + tunnel: disabled + ipv4-native-routing-cidr: 192.168.28.0/24 + auto-direct-node-routes: yes + enable-local-redirect-policy: yes +base_sysctl_config_user: + net.ipv4.conf.all.rp_filter: 0 + net.ipv4.conf.default.rp_filter: 0 ### None # diff --git a/roles/kubernetes/kubeadm/base/tasks/net_cilium.yml b/roles/kubernetes/kubeadm/base/tasks/net_cilium.yml index 8620ffea..d6b583e9 100644 --- a/roles/kubernetes/kubeadm/base/tasks/net_cilium.yml +++ b/roles/kubernetes/kubeadm/base/tasks/net_cilium.yml @@ -1,6 +1,6 @@ --- -- name: make sure kubernetes_enable_nodelocal_dnscache is not set +- name: make sure local-redirect-policy is confgured when node-local dnscache is enabled run_once: yes assert: - msg: "we currently don't support nodelocal dns-caches when using cilium, please set kubernetes_enable_nodelocal_dnscache to false." - that: not kubernetes_enable_nodelocal_dnscache + msg: "nodelocal dns-caches needs cilium local-redirect policies to be enabled, please enable it like this kubernetes_cilium_config['enable-local-redirect-policy'] = true." + that: "(not kubernetes_enable_nodelocal_dnscache) or (('enable-local-redirect-policy' in kubernetes_cilium_config) and (kubernetes_cilium_config['enable-local-redirect-policy']))" diff --git a/roles/kubernetes/kubeadm/control-plane/tasks/net_cilium.yml b/roles/kubernetes/kubeadm/control-plane/tasks/net_cilium.yml index 4d535ed4..a25d5e63 100644 --- a/roles/kubernetes/kubeadm/control-plane/tasks/net_cilium.yml +++ b/roles/kubernetes/kubeadm/control-plane/tasks/net_cilium.yml @@ -31,22 +31,22 @@ content: "{{ cilium_install.stderr }}\n" dest: /etc/kubernetes/network-plugin/install.errors -## TODO: enable this once we have a working deployment -# - name: install node-local dns cache -# when: kubernetes_enable_nodelocal_dnscache -# block: -# - name: generate node-local dns cache config -# template: -# src: net_cilium/node-local-dns.yml.j2 -# dest: /etc/kubernetes/network-plugin/node-local-dns.yml - -# - name: check if node-local dns cache is already installed -# check_mode: no -# command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin/node-local-dns.yml -# failed_when: false -# changed_when: false -# register: kube_node_local_dns_diff_result - -# - name: install node-local dns cache -# when: kube_node_local_dns_diff_result.rc != 0 -# command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin/node-local-dns.yml + +- name: install node-local dns cache + when: kubernetes_enable_nodelocal_dnscache + block: + - name: generate node-local dns cache config + template: + src: net_cilium/node-local-dns.yml.j2 + dest: /etc/kubernetes/network-plugin/node-local-dns.yml + + - name: check if node-local dns cache is already installed + check_mode: no + command: kubectl --kubeconfig /etc/kubernetes/admin.conf diff -f /etc/kubernetes/network-plugin/node-local-dns.yml + failed_when: false + changed_when: false + register: kube_node_local_dns_diff_result + + - name: install node-local dns cache + when: kube_node_local_dns_diff_result.rc != 0 + command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/network-plugin/node-local-dns.yml diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_cilium/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_cilium/node-local-dns.yml.j2 new file mode 100644 index 00000000..da9d50c7 --- /dev/null +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_cilium/node-local-dns.yml.j2 @@ -0,0 +1,227 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ kubernetes.dns_domain | default('cluster.local') }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + health :8080 + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__CLUSTER__DNS__ { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop + bind 0.0.0.0 + forward . __PILLAR__UPSTREAM__SERVERS__ + prometheus :9253 + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + annotations: + policy.cilium.io/no-track-port: "53" + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + spec: + priorityClassName: system-node-critical + serviceAccountName: node-local-dns + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + containers: + - name: node-cache + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.20 + resources: + requests: + cpu: 25m + memory: 5Mi + args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream", "-skipteardown=true", "-setupinterface=false", "-setupiptables=false" ] + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +--- +# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods. +# We use this to expose metrics to Prometheus. +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9253" + prometheus.io/scrape: "true" + labels: + k8s-app: node-local-dns + name: node-local-dns + namespace: kube-system +spec: + clusterIP: None + ports: + - name: metrics + port: 9253 + targetPort: 9253 + selector: + k8s-app: node-local-dns +--- +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "nodelocaldns" + namespace: kube-system +spec: + redirectFrontend: + addressMatcher: + ip: "{{ kubernetes_nodelocal_dnscache_ip }}" + toPorts: + - port: "53" + name: dns + protocol: UDP + - port: "53" + name: dns-tcp + protocol: TCP + redirectBackend: + localEndpointSelector: + matchLabels: + k8s-app: node-local-dns + toPorts: + - port: "53" + name: dns + protocol: UDP + - port: "53" + name: dns-tcp + protocol: TCP diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2 index 22ae6c22..88f69a16 100644 --- a/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kube-router/node-local-dns.yml.j2 @@ -97,9 +97,7 @@ data: reload loop bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__UPSTREAM__SERVERS__ { - force_tcp - } + forward . __PILLAR__UPSTREAM__SERVERS__ prometheus :9253 } --- diff --git a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2 index 22ae6c22..88f69a16 100644 --- a/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2 +++ b/roles/kubernetes/kubeadm/control-plane/templates/net_kubeguard/node-local-dns.yml.j2 @@ -97,9 +97,7 @@ data: reload loop bind {{ kubernetes_nodelocal_dnscache_ip }} - forward . __PILLAR__UPSTREAM__SERVERS__ { - force_tcp - } + forward . __PILLAR__UPSTREAM__SERVERS__ prometheus :9253 } --- |