summaryrefslogtreecommitdiff
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/apps/coturn/templates/acmetool-reload.sh.j22
-rwxr-xr-xroles/apps/nextcloud/templates/nextcloud-occ.j22
-rw-r--r--roles/apps/nextcloud/templates/run-cron.sh.j22
-rw-r--r--roles/kubernetes/addons/metrics-server/tasks/main.yml10
-rw-r--r--roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2156
-rw-r--r--roles/kubernetes/base/tasks/cri_containerd.yml2
-rw-r--r--roles/kubernetes/base/tasks/cri_docker.yml9
-rw-r--r--roles/kubernetes/base/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/primary-master.yml17
-rw-r--r--roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml2
-rw-r--r--roles/kubernetes/kubeadm/master/templates/kubeadm.config.j25
-rw-r--r--roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2190
-rw-r--r--roles/kubernetes/kubeadm/node/tasks/main.yml2
-rw-r--r--roles/kubernetes/kubeadm/reset/tasks/main.yml13
-rw-r--r--roles/kubernetes/standalone/templates/kubelet-config.yml.j21
-rw-r--r--roles/kubernetes/standalone/templates/kubelet.service.override.j22
16 files changed, 406 insertions, 11 deletions
diff --git a/roles/apps/coturn/templates/acmetool-reload.sh.j2 b/roles/apps/coturn/templates/acmetool-reload.sh.j2
index 1eff1ad3..c90c296d 100644
--- a/roles/apps/coturn/templates/acmetool-reload.sh.j2
+++ b/roles/apps/coturn/templates/acmetool-reload.sh.j2
@@ -18,7 +18,7 @@ while read name; do
install -m 0644 -o root -g coturn "$certdir/fullchain" "$SSL_D/cert.pem"
install -m 0640 -o root -g coturn "$certdir/privkey" "$SSL_D/privkey.pem"
-{% if kubernetes_cri_socket is defined %}
+{% if kubernetes_cri_socket %}
export CONTAINER_RUNTIME_ENDPOINT="{{ kubernetes_cri_socket }}"
{% endif %}
pod_id=$(crictl pods -q --state ready --name "^coturn-{{ coturn_realm }}-{{ ansible_nodename }}$")
diff --git a/roles/apps/nextcloud/templates/nextcloud-occ.j2 b/roles/apps/nextcloud/templates/nextcloud-occ.j2
index 571aecc4..7e2a51d4 100755
--- a/roles/apps/nextcloud/templates/nextcloud-occ.j2
+++ b/roles/apps/nextcloud/templates/nextcloud-occ.j2
@@ -9,7 +9,7 @@ if [ -z "$INST_NAME" ]; then
fi
set -eu
-{% if kubernetes_cri_socket is defined %}
+{% if kubernetes_cri_socket %}
export CONTAINER_RUNTIME_ENDPOINT="{{ kubernetes_cri_socket }}"
{% endif %}
diff --git a/roles/apps/nextcloud/templates/run-cron.sh.j2 b/roles/apps/nextcloud/templates/run-cron.sh.j2
index 9936bad1..755b7cb1 100644
--- a/roles/apps/nextcloud/templates/run-cron.sh.j2
+++ b/roles/apps/nextcloud/templates/run-cron.sh.j2
@@ -1,6 +1,6 @@
#!/bin/bash
-{% if kubernetes_cri_socket is defined %}
+{% if kubernetes_cri_socket %}
export CONTAINER_RUNTIME_ENDPOINT="{{ kubernetes_cri_socket }}"
{% endif %}
diff --git a/roles/kubernetes/addons/metrics-server/tasks/main.yml b/roles/kubernetes/addons/metrics-server/tasks/main.yml
new file mode 100644
index 00000000..e09106c1
--- /dev/null
+++ b/roles/kubernetes/addons/metrics-server/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: copy config for metrics-server
+ template:
+ src: "components.{{ kubernetes_metrics_server_version }}.yml.j2"
+ dest: /etc/kubernetes/metrics-server.yml
+
+- name: install metrics-server onto the cluster
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/metrics-server.yml
+ register: kube_metrics_server_apply_result
+ changed_when: (kube_metrics_server_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
diff --git a/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2 b/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2
new file mode 100644
index 00000000..1e3789bb
--- /dev/null
+++ b/roles/kubernetes/addons/metrics-server/templates/components.0.3.6.yml.j2
@@ -0,0 +1,156 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: system:aggregated-metrics-reader
+ labels:
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+rules:
+- apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: metrics-server:system:auth-delegator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: apiregistration.k8s.io/v1beta1
+kind: APIService
+metadata:
+ name: v1beta1.metrics.k8s.io
+spec:
+ service:
+ name: metrics-server
+ namespace: kube-system
+ group: metrics.k8s.io
+ version: v1beta1
+ insecureSkipTLSVerify: true
+ groupPriorityMinimum: 100
+ versionPriority: 100
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: metrics-server
+ namespace: kube-system
+ labels:
+ k8s-app: metrics-server
+spec:
+ selector:
+ matchLabels:
+ k8s-app: metrics-server
+ template:
+ metadata:
+ name: metrics-server
+ labels:
+ k8s-app: metrics-server
+ spec:
+ serviceAccountName: metrics-server
+ volumes:
+ # mount in tmp so we can safely use from-scratch images and/or read-only containers
+ - name: tmp-dir
+ emptyDir: {}
+ containers:
+ - name: metrics-server
+ image: k8s.gcr.io/metrics-server-amd64:v0.3.6
+ imagePullPolicy: IfNotPresent
+ args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-insecure-tls
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP
+ ports:
+ - name: main-port
+ containerPort: 4443
+ protocol: TCP
+ securityContext:
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ volumeMounts:
+ - name: tmp-dir
+ mountPath: /tmp
+ nodeSelector:
+ kubernetes.io/os: linux
+ kubernetes.io/arch: "amd64"
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: metrics-server
+ namespace: kube-system
+ labels:
+ kubernetes.io/name: "Metrics-server"
+ kubernetes.io/cluster-service: "true"
+spec:
+ selector:
+ k8s-app: metrics-server
+ ports:
+ - port: 443
+ protocol: TCP
+ targetPort: main-port
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - nodes/stats
+ - namespaces
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: system:metrics-server
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
diff --git a/roles/kubernetes/base/tasks/cri_containerd.yml b/roles/kubernetes/base/tasks/cri_containerd.yml
index ff904ffc..549ccae0 100644
--- a/roles/kubernetes/base/tasks/cri_containerd.yml
+++ b/roles/kubernetes/base/tasks/cri_containerd.yml
@@ -1,7 +1,7 @@
---
- name: make sure the kubernetes_cri_socket variable is configured correctly
assert:
- msg: "The variable kubernetes_cri_socket is not configured to use containerd as container runtime."
+ msg: "The variable kubernetes_cri_socket is not configured correctly. You might need to move your host to the group kubernetes-cluster or standalone-kubelet!"
that:
- kubernetes_cri_socket == "unix:///run/containerd/containerd.sock"
diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml
index 67196f51..50558d70 100644
--- a/roles/kubernetes/base/tasks/cri_docker.yml
+++ b/roles/kubernetes/base/tasks/cri_docker.yml
@@ -1,7 +1,14 @@
---
+
+- name: make sure the kubernetes_cri_socket variable is configured correctly
+ assert:
+ msg: "The variable kubernetes_cri_socket is not configured correctly. You might need to move your host to the group kubernetes-cluster or standalone-kubelet!"
+ that:
+ - not kubernetes_cri_socket
+
- name: disable bridge and iptables in docker daemon config
set_fact:
- docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'bridge': 'none', 'iptables': false}) }}"
+ docker_daemon_config: "{{ docker_daemon_config | default({}) | combine({'exec-opts': ['native.cgroupdriver=systemd'], 'bridge': 'none', 'iptables': false}) }}"
- name: install docker
include_role:
diff --git a/roles/kubernetes/base/tasks/main.yml b/roles/kubernetes/base/tasks/main.yml
index 721bc730..602266d5 100644
--- a/roles/kubernetes/base/tasks/main.yml
+++ b/roles/kubernetes/base/tasks/main.yml
@@ -37,7 +37,7 @@
create: yes
marker: "### {mark} ANSIBLE MANAGED BLOCK for crictl ###"
content: |
- {% if kubernetes_cri_socket is defined %}
+ {% if kubernetes_cri_socket %}
alias crictl="crictl --runtime-endpoint {{ kubernetes_cri_socket }}"
{% endif %}
{% if item == 'zsh' %}
diff --git a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
index 9bbe9ecc..f24e9ac1 100644
--- a/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/primary-master.yml
@@ -27,8 +27,8 @@
- name: initialize kubernetes master and store log
block:
- name: initialize kubernetes master
- command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
- # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
+ command: "kubeadm init --config /etc/kubernetes/kubeadm.config --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --skip-token-print"
+ # command: "kubeadm init --config /etc/kubernetes/kubeadm.config{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %}{% if kubernetes_network_plugin == 'kube-router' %} --skip-phases addon/kube-proxy{% endif %} --token '{{ kubeadm_token_generate.stdout }}' --token-ttl 42m --skip-token-print"
args:
creates: /etc/kubernetes/pki/ca.crt
register: kubeadm_init
@@ -104,6 +104,19 @@
loop: "{{ groups['_kubernetes_nodes_'] }}"
+## install node-local-dns
+
+- name: generate node-local dns cache config
+ template:
+ src: node-local-dns.yml.j2
+ dest: /etc/kubernetes/node-local-dns.yml
+
+- name: install node-local dns cache
+ command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /etc/kubernetes/node-local-dns.yml
+ register: kube_node_local_dns_apply_result
+ changed_when: (kube_node_local_dns_apply_result.stdout_lines | reject("regex", " unchanged$") | list | length) > 0
+
+
## Network Plugin
# - name: install network plugin
diff --git a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
index c00c3203..cb135adc 100644
--- a/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
+++ b/roles/kubernetes/kubeadm/master/tasks/secondary-masters.yml
@@ -28,7 +28,7 @@
- name: join kubernetes secondary master node and store log
block:
- name: join kubernetes secondary master node
- command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }} --apiserver-bind-port 6442{% if kubernetes_overlay_node_ip is defined %} --apiserver-advertise-address {{ kubernetes_overlay_node_ip }}{% endif %}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}' --control-plane --certificate-key {{ kubeadm_upload_certs_key }}"
args:
creates: /etc/kubernetes/kubelet.conf
register: kubeadm_join
diff --git a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2 b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
index bb7f9a96..4b8548f7 100644
--- a/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
+++ b/roles/kubernetes/kubeadm/master/templates/kubeadm.config.j2
@@ -43,3 +43,8 @@ controllerManager:
scheduler: {}
dns:
type: CoreDNS
+---
+apiVersion: kubelet.config.k8s.io/v1beta1
+kind: KubeletConfiguration
+clusterDNS:
+- {{ kubernetes_nodelocal_dnscache_ip }}
diff --git a/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2 b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
new file mode 100644
index 00000000..210c551a
--- /dev/null
+++ b/roles/kubernetes/kubeadm/master/templates/node-local-dns.yml.j2
@@ -0,0 +1,190 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-dns-upstream
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+ kubernetes.io/name: "KubeDNSUpstream"
+spec:
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ targetPort: 53
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
+ targetPort: 53
+ selector:
+ k8s-app: kube-dns
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+data:
+ Corefile: |
+ {{ kubernetes.dns_domain | default('cluster.local') }}:53 {
+ errors
+ cache {
+ success 9984 30
+ denial 9984 5
+ }
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ health {{ kubernetes_nodelocal_dnscache_ip }}:8080
+ }
+ in-addr.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ ip6.arpa:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__CLUSTER__DNS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+ .:53 {
+ errors
+ cache 30
+ reload
+ loop
+ bind {{ kubernetes_nodelocal_dnscache_ip }}
+ forward . __PILLAR__UPSTREAM__SERVERS__ {
+ force_tcp
+ }
+ prometheus :9253
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: node-local-dns
+ namespace: kube-system
+ labels:
+ k8s-app: node-local-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 10%
+ selector:
+ matchLabels:
+ k8s-app: node-local-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: node-local-dns
+ annotations:
+ prometheus.io/port: "9253"
+ prometheus.io/scrape: "true"
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: node-local-dns
+ hostNetwork: true
+ dnsPolicy: Default # Don't use cluster DNS.
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - effect: "NoExecute"
+ operator: "Exists"
+ - effect: "NoSchedule"
+ operator: "Exists"
+ containers:
+ - name: node-cache
+ image: k8s.gcr.io/k8s-dns-node-cache:1.15.13
+ resources:
+ requests:
+ cpu: 25m
+ memory: 5Mi
+ args: [ "-localip", "{{ kubernetes_nodelocal_dnscache_ip }}", "-conf", "/etc/Corefile", "-upstreamsvc", "kube-dns-upstream" ]
+ securityContext:
+ privileged: true
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9253
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ host: {{ kubernetes_nodelocal_dnscache_ip }}
+ path: /health
+ port: 8080
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ volumeMounts:
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - name: config-volume
+ mountPath: /etc/coredns
+ - name: kube-dns-config
+ mountPath: /etc/kube-dns
+ volumes:
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ - name: kube-dns-config
+ configMap:
+ name: kube-dns
+ optional: true
+ - name: config-volume
+ configMap:
+ name: node-local-dns
+ items:
+ - key: Corefile
+ path: Corefile.base
diff --git a/roles/kubernetes/kubeadm/node/tasks/main.yml b/roles/kubernetes/kubeadm/node/tasks/main.yml
index 1d5178ea..655b1b18 100644
--- a/roles/kubernetes/kubeadm/node/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/node/tasks/main.yml
@@ -2,7 +2,7 @@
- name: join kubernetes node and store log
block:
- name: join kubernetes node
- command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket is defined %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'"
+ command: "kubeadm join 127.0.0.1:6443 --node-name {{ inventory_hostname }}{% if kubernetes_cri_socket %} --cri-socket {{ kubernetes_cri_socket }}{% endif %} --token '{{ kube_bootstrap_token }}' --discovery-token-ca-cert-hash '{{ kube_bootstrap_ca_cert_hash }}'"
args:
creates: /etc/kubernetes/kubelet.conf
register: kubeadm_join
diff --git a/roles/kubernetes/kubeadm/reset/tasks/main.yml b/roles/kubernetes/kubeadm/reset/tasks/main.yml
index 1e3539e1..c35e2bfc 100644
--- a/roles/kubernetes/kubeadm/reset/tasks/main.yml
+++ b/roles/kubernetes/kubeadm/reset/tasks/main.yml
@@ -12,3 +12,16 @@
file:
path: "{{ item }}"
state: absent
+
+- name: get list of all cni configs
+ find:
+ paths: /etc/cni/net.d
+ register: kubeadm_reset_cni
+
+- name: remove all cni configs
+ loop: "{{ kubeadm_reset_cni.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+ file:
+ path: "{{ item.path }}"
+ state: absent
diff --git a/roles/kubernetes/standalone/templates/kubelet-config.yml.j2 b/roles/kubernetes/standalone/templates/kubelet-config.yml.j2
index 4e6716eb..d6af0f24 100644
--- a/roles/kubernetes/standalone/templates/kubelet-config.yml.j2
+++ b/roles/kubernetes/standalone/templates/kubelet-config.yml.j2
@@ -19,6 +19,7 @@ maxPods: {{ kubernetes_standalone_max_pods }}
makeIPTablesUtilChains: false
hairpinMode: none
resolvConf: {{ kubernetes_standalone_resolv_conf }}
+cgroupDriver: systemd
enableControllerAttachDetach: false
featureGates:
RuntimeClass: false
diff --git a/roles/kubernetes/standalone/templates/kubelet.service.override.j2 b/roles/kubernetes/standalone/templates/kubelet.service.override.j2
index 3eb8b63d..fe8bfb4c 100644
--- a/roles/kubernetes/standalone/templates/kubelet.service.override.j2
+++ b/roles/kubernetes/standalone/templates/kubelet.service.override.j2
@@ -2,7 +2,7 @@
ExecStart=
ExecStart=/usr/bin/kubelet \
--config=/etc/kubernetes/kubelet.yml \
-{% if kubernetes_cri_socket is defined %}
+{% if kubernetes_cri_socket %}
--container-runtime=remote \
--container-runtime-endpoint={{ kubernetes_cri_socket }} \
{% endif %}