summaryrefslogtreecommitdiff
path: root/roles/kubernetes/kubeadm/master
diff options
context:
space:
mode:
Diffstat (limited to 'roles/kubernetes/kubeadm/master')
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/main.yml26
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j212
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2236
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j213
-rw-r--r--roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2170
-rw-r--r--roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j223
6 files changed, 459 insertions, 21 deletions
diff --git a/roles/kubernetes/kubeadm/master/tasks/main.yml b/roles/kubernetes/kubeadm/master/tasks/main.yml
index 19037adc..04df760f 100644
--- a/roles/kubernetes/kubeadm/master/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/main.yml
@@ -31,14 +31,28 @@
set_fact:
kube_node_taints: "{% set node_info = kubectl_get_node.stdout | from_json %}{%if node_info.spec.taints is defined %}{{ node_info.spec.taints | map(attribute='key') | list }}{% endif %}"
-- name: remove taint from master node
- when: not kubernetes.dedicated_master and 'node-role.kubernetes.io/master' in kube_node_taints
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
+- name: remove taint from master/control-plane node
+ when: not kubernetes.dedicated_master
+ block:
+ - name: remove master taint from node
+ when: "'node-role.kubernetes.io/master' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master-"
-- name: add taint for master node
- when: kubernetes.dedicated_master and 'node-role.kubernetes.io/master' not in kube_node_taints
- command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
+ - name: remove control-plane taint from node
+ when: "'node-role.kubernetes.io/control-plane' in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane-"
+- name: add taint from master/control-plane node
+ when: kubernetes.dedicated_master
+ block:
+ - name: add master taint from node
+ when: "'node-role.kubernetes.io/master' not in kube_node_taints"
+ command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/master='':NoSchedule"
+
+ ## TODO: enable this once all needed addons and workloads have tolerations set accordingly
+ # - name: add control-plane taint from node
+ # when: "'node-role.kubernetes.io/control-plane' not in kube_node_taints"
+ # command: "kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes {{ inventory_hostname }} node-role.kubernetes.io/control-plane='':NoSchedule"
- name: prepare kubectl (1/2)
file:
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
index b06687d5..a2660db2 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.0.4.0.yml.j2
@@ -153,13 +153,11 @@ spec:
mountPath: /etc/kube-router
hostNetwork: true
tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
- - effect: NoSchedule
- key: node.kubernetes.io/not-ready
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
operator: Exists
volumes:
- name: lib-modules
@@ -189,7 +187,7 @@ metadata:
namespace: kube-system
---
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-router
namespace: kube-system
@@ -224,7 +222,7 @@ rules:
- watch
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-router
roleRef:
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2
new file mode 100644
index 00000000..382164cb
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kube-router/config.1.1.1.yml.j2
@@ -0,0 +1,236 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ cni-conf.json: |
+ {
+ "cniVersion":"0.3.0",
+ "name":"mynet",
+ "plugins":[
+ {
+ "name":"kubernetes",
+ "type":"bridge",
+ "bridge":"kube-bridge",
+ "isDefaultGateway":true,
+ "hairpinMode": true,
+ "ipam":{
+ "type":"host-local"
+ }
+ },
+ {
+ "type":"portmap",
+ "capabilities":{
+ "snat":true,
+ "portMappings":true
+ }
+ }
+ ]
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=true
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ - --routes-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: KUBE_ROUTER_CNI_CONF_FILE
+ value: /etc/cni/net.d/10-kuberouter.conflist
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ initContainers:
+ - name: install-cni
+ image: busybox
+ imagePullPolicy: Always
+ command:
+ - /bin/sh
+ - -c
+ - set -e -x;
+ if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
+ if [ -f /etc/cni/net.d/*.conf ]; then
+ rm -f /etc/cni/net.d/*.conf;
+ fi;
+ TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
+ cp /etc/kube-router/cni-conf.json ${TMP};
+ mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
+ fi
+ volumeMounts:
+ - name: cni-conf-dir
+ mountPath: /etc/cni/net.d
+ - name: kube-router-cfg
+ mountPath: /etc/kube-router
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: cni-conf-dir
+ hostPath:
+ path: /etc/cni/net.d
+ - name: kube-router-cfg
+ configMap:
+ name: kube-router-cfg
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
index 51bfdaae..e343f4a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
+++ b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.0.4.0.yml.j2
@@ -57,6 +57,7 @@ spec:
image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
imagePullPolicy: Always
args:
+ - --cluster-cidr={{ kubernetes.pod_ip_range }}
- --run-router=false
- --run-firewall=true
- --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
@@ -93,13 +94,11 @@ spec:
readOnly: false
hostNetwork: true
tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
- - effect: NoSchedule
- key: node.kubernetes.io/not-ready
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
operator: Exists
volumes:
- name: lib-modules
@@ -123,7 +122,7 @@ metadata:
namespace: kube-system
---
kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-router
namespace: kube-system
@@ -158,7 +157,7 @@ rules:
- watch
---
kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-router
roleRef:
diff --git a/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2 b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2
new file mode 100644
index 00000000..ec30d670
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/net_kubeguard/kube-router.1.1.1.yml.j2
@@ -0,0 +1,170 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kube-router-kubeconfig
+ namespace: kube-system
+ labels:
+ tier: node
+ k8s-app: kube-router
+data:
+ kubeconfig.conf: |
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - cluster:
+ certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ server: https://127.0.0.1:{{ kubernetes_api_lb_port | default('6443') }}
+ name: default
+ contexts:
+ - context:
+ cluster: default
+ namespace: default
+ user: default
+ name: default
+ current-context: default
+ users:
+ - name: default
+ user:
+ tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ name: kube-router
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: kube-router
+ tier: node
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-router
+ tier: node
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-router
+ serviceAccount: kube-router
+ containers:
+ - name: kube-router
+ image: docker.io/cloudnativelabs/kube-router:v{{ kubernetes_network_plugin_version }}
+ imagePullPolicy: Always
+ args:
+ - --run-router=false
+ - --run-firewall=true
+ - --run-service-proxy={{ kubernetes_network_plugin_replaces_kube_proxy | string | lower }}
+ - --bgp-graceful-restart=true
+ - --kubeconfig=/var/lib/kube-router/kubeconfig
+ - --hairpin-mode
+ - --iptables-sync-period=10s
+ - --ipvs-sync-period=10s
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 20244
+ initialDelaySeconds: 10
+ periodSeconds: 3
+ resources:
+ requests:
+ cpu: 250m
+ memory: 250Mi
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: lib-modules
+ mountPath: /lib/modules
+ readOnly: true
+ - name: kubeconfig
+ mountPath: /var/lib/kube-router
+ readOnly: true
+ - name: xtables-lock
+ mountPath: /run/xtables.lock
+ readOnly: false
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ volumes:
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: kubeconfig
+ configMap:
+ name: kube-router-kubeconfig
+ items:
+ - key: kubeconfig.conf
+ path: kubeconfig
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-router
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ - services
+ - nodes
+ - endpoints
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - "networking.k8s.io"
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-router
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-router
+subjects:
+- kind: ServiceAccount
+ name: kube-router
+ namespace: kube-system
diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
index 210c551a..d536d5a7 100644
--- a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
+++ b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
@@ -140,7 +140,7 @@ spec:
operator: "Exists"
containers:
- name: node-cache
- image: k8s.gcr.io/k8s-dns-node-cache:1.15.13
+ image: k8s.gcr.io/dns/k8s-dns-node-cache:1.16.0
resources:
requests:
cpu: 25m
@@ -188,3 +188,24 @@ spec:
items:
- key: Corefile
path: Corefile.base
+---
+# A headless service is a service with a service IP but instead of load-balancing it will return the IPs of our associated Pods.
+# We use this to expose metrics to Prometheus.
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ labels:
+ k8s-app: node-local-dns
+ name: node-local-dns
+ namespace: kube-system
+spec:
+ clusterIP: None
+ ports:
+ - name: metrics
+ port: 9253
+ targetPort: 9253
+ selector:
+ k8s-app: node-local-dns