diff options
Diffstat (limited to 'roles')
33 files changed, 561 insertions, 408 deletions
diff --git a/roles/apps/nextcloud/base/defaults/main.yml b/roles/apps/nextcloud/base/defaults/main.yml new file mode 100644 index 00000000..1a8a6d52 --- /dev/null +++ b/roles/apps/nextcloud/base/defaults/main.yml @@ -0,0 +1,9 @@ +--- +nextcloud_app_uid: "950" +nextcloud_app_gid: "950" + +nextcloud_db_uid: "951" +nextcloud_db_gid: "951" + +nextcloud_redis_uid: "952" +nextcloud_redis_gid: "952" diff --git a/roles/apps/nextcloud/base/tasks/main.yml b/roles/apps/nextcloud/base/tasks/main.yml new file mode 100644 index 00000000..4c85a35c --- /dev/null +++ b/roles/apps/nextcloud/base/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: add group for nextcloud app + group: + name: nc-app + gid: "{{ nextcloud_app_gid }}" + +- name: add user for nextcloud app + user: + name: nc-app + uid: "{{ nextcloud_app_uid }}" + group: nc-app + password: "!" + +- name: add group for nextcloud db + group: + name: nc-db + gid: "{{ nextcloud_db_gid }}" + +- name: add user for nextcloud db + user: + name: nc-db + uid: "{{ nextcloud_db_uid }}" + group: nc-db + password: "!" + +- name: add group for nextcloud redis + group: + name: nc-redis + gid: "{{ nextcloud_redis_gid }}" + +- name: add user for nextcloud redis + user: + name: nc-redis + uid: "{{ nextcloud_redis_uid }}" + group: nc-redis + password: "!" + +- name: install template systemd unit for cron trigger + template: + src: cron@.service.j2 + dest: /etc/systemd/system/nextcloud-cron@.service + +- name: install management scripts + loop: + - nextcloud-upgrade + - nextcloud-occ + - nextcloud-cron + template: + src: "{{ item }}.j2" + dest: "/usr/local/bin/{{ item }}" + mode: 0755 diff --git a/roles/apps/nextcloud/templates/cron@.service.j2 b/roles/apps/nextcloud/base/templates/cron@.service.j2 index d8cde0a3..d8cde0a3 100644 --- a/roles/apps/nextcloud/templates/cron@.service.j2 +++ b/roles/apps/nextcloud/base/templates/cron@.service.j2 diff --git a/roles/apps/nextcloud/templates/nextcloud-cron.j2 b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2 index 355ae2c3..cf1d9715 100755 --- a/roles/apps/nextcloud/templates/nextcloud-cron.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2 @@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id") if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi -exec crictl exec "$container_id" php -f /var/www/html/cron.php +exec crictl exec "$container_id" bash -c 'php -f /var/www/html/occ status -e; if [ $? -eq 0 ]; then php -f /var/www/html/cron.php; else echo "not running cron script when in maintenance mode"; fi' diff --git a/roles/apps/nextcloud/templates/nextcloud-occ.j2 b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2 index f12f1259..01383c95 100755 --- a/roles/apps/nextcloud/templates/nextcloud-occ.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2 @@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id") if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi -exec crictl exec -it "$container_id" php /var/www/html/occ $@ +exec crictl exec -it "$container_id" php -f /var/www/html/occ $@ diff --git a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2 b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2 index ffa912e8..f6edcb44 100755 --- a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2 +++ b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2 @@ -9,6 +9,13 @@ fi set -eu +CURRENT_VERSION=$(nextcloud-occ "$INST_NAME" status -n --no-warnings --output plain | tr -d '\r' | awk -F : '/versionstring/ { print($2) }' | tr -d ' ') +if [ "$CURRENT_VERSION" = "$VERSION" ]; then + echo "The current running version of nextcloud is already $CURRENT_VERSION, nothing to do here." + exit 0 +fi +echo "will upgrade nextcloud instance $INST_NAME from '$CURRENT_VERSION' to '$VERSION'" + K8S_CONFIG_HASH_D="/etc/kubernetes/config-hashes/" K8S_CONFIG_HASH_FILE="$K8S_CONFIG_HASH_D/nextcloud-$INST_NAME.yml" K8S_MANIFEST_D="/etc/kubernetes/manifests/" @@ -41,16 +48,8 @@ else echo "" fi -STORAGE_TYPE=$(findmnt -no fstype -T "$IMAGE_BUILD_D") -if [ $STORAGE_TYPE == "zfs" ]; then - echo "*** creating ZFS snapshot" - echo "" - - IMAGE_NAME_ESCAPED=${IMAGE_NAME/\//\\/} - CURRENT_VERSION=$(cat "$K8S_MANIFEST_FILE" | awk '/image: "'"$IMAGE_NAME_ESCAPED"':.*"/ { print($2) }' | tr -d '"' | cut -d ':' -f 2) - ZFS_VOLUME=$(findmnt -no source -T "$IMAGE_BUILD_D") - zfs snapshot "$ZFS_VOLUME@upgrade_$CURRENT_VERSION-to-$VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')" -fi +INSTANCE_BASE_D=$(dirname "$IMAGE_BUILD_D") +"$INSTANCE_BASE_D/upgrade.sh" prepare "$CURRENT_VERSION" "$VERSION" echo "*** Rebuilding config-hash file" echo "" @@ -70,4 +69,6 @@ cat "$TMP_D/config-hash.yml" > "$K8S_CONFIG_HASH_FILE" cat "$TMP_D/manifest.yml" > "$K8S_MANIFEST_FILE" echo "" +"$INSTANCE_BASE_D/upgrade.sh" finalize "$CURRENT_VERSION" "$VERSION" + exit 0 diff --git a/roles/apps/nextcloud/defaults/main.yml b/roles/apps/nextcloud/defaults/main.yml index ac87de94..631b0a0a 100644 --- a/roles/apps/nextcloud/defaults/main.yml +++ b/roles/apps/nextcloud/defaults/main.yml @@ -1,21 +1,9 @@ --- -nextcloud_app_uid: "950" -nextcloud_app_gid: "950" - -nextcloud_db_uid: "951" -nextcloud_db_gid: "951" - -nextcloud_redis_uid: "952" -nextcloud_redis_gid: "952" - # nextcloud_instances: # example: # new: yes # version: 17.0.0 # port: 8100 -# hostnames: -# - wolke.example.com -# - cloud.example.com # storage: # type: ... # database: @@ -26,3 +14,10 @@ nextcloud_redis_gid: "952" # from: foo/bar:1.0 # optional # dockerfile: | # RUN apt-get install ... +# publish: +# zone: "{{ apps_publish_zone__foo }}" +# hostnames: +# - wolke.example.com +# - cloud.example.com +# tls: +# certificate_provider: ... diff --git a/roles/apps/nextcloud/instance/tasks/main.yml b/roles/apps/nextcloud/instance/tasks/main.yml index 373aa0a8..71a3ee79 100644 --- a/roles/apps/nextcloud/instance/tasks/main.yml +++ b/roles/apps/nextcloud/instance/tasks/main.yml @@ -47,6 +47,43 @@ dest: "{{ nextcloud_instance_basepath }}/config/ports.conf" +- name: create tls directory + file: + path: "{{ nextcloud_instance_basepath }}/tls" + owner: "{{ nextcloud_app_uid }}" + group: "{{ nextcloud_app_gid }}" + mode: 0500 + state: directory + +- name: generate/install TLS certificates for publishment + vars: + x509_certificate_name: "nextcloud-{{ nextcloud_instance }}_publish" + x509_certificate_hostnames: [] + x509_certificate_config: + ca: "{{ nextcloud_instances[nextcloud_instance].publish.zone.certificate_ca_config }}" + cert: + common_name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" + extended_key_usage: + - serverAuth + extended_key_usage_critical: yes + create_subject_key_identifier: yes + not_after: +100w + x509_certificate_renewal: + install: + - dest: "{{ nextcloud_instance_basepath }}/tls/cert.pem" + src: + - cert + owner: "{{ nextcloud_app_uid }}" + mode: "0400" + - dest: "{{ nextcloud_instance_basepath }}/tls/key.pem" + src: + - key + owner: "{{ nextcloud_app_uid }}" + mode: "0400" + include_role: + name: "x509/{{ nextcloud_instances[nextcloud_instance].publish.zone.certificate_provider }}/cert" + + - name: build custom image # when: "'custom_image' in nextcloud_instances[nextcloud_instance]" include_tasks: custom-image.yml @@ -71,6 +108,13 @@ name: kubernetes/standalone/pod +- name: install upgrade helper script + template: + src: upgrade.sh.j2 + dest: "{{ nextcloud_instance_basepath }}/upgrade.sh" + mode: 0755 + + - name: install systemd timer unit template: src: cron-.timer.j2 @@ -84,29 +128,44 @@ enabled: yes -- name: configure nginx vhost +- name: configure nginx vhost for publishment vars: - nginx_vhost: - name: "nextcloud-{{ nextcloud_instance }}" + nginx_vhost__yaml: | + name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" template: generic + {% if 'tls' in nextcloud_instances[nextcloud_instance].publish %} tls: - certificate_provider: "{{ acme_client }}" - hostnames: "{{ nextcloud_instances[nextcloud_instance].hostnames }}" + {{ nextcloud_instances[nextcloud_instance].publish.tls | to_nice_yaml(indent=2) | indent(2) }} + {% endif %} + hostnames: + {% for hostname in nextcloud_instances[nextcloud_instance].publish.hostnames %} + - {{ hostname }} + {% endfor %} locations: '/': - proxy_pass: "http://127.0.0.1:{{ nextcloud_instances[nextcloud_instance].port }}" + {% if nextcloud_instances[nextcloud_instance].publish.zone.publisher == inventory_hostname %} + proxy_pass: "https://127.0.0.1:{{ nextcloud_instances[nextcloud_instance].port }}" + {% else %} + proxy_pass: "https://{{ ansible_default_ipv4.address }}:{{ nextcloud_instances[nextcloud_instance].port }}" + {% endif %} proxy_redirect: - - redirect: "http://$host/" - replacement: "https://$host/" - - redirect: "http://$host:8080/" + - redirect: "https://$host:8080/" replacement: "https://$host/" + proxy_ssl: + trusted_certificate: "/etc/ssl/apps-publish-{{ nextcloud_instances[nextcloud_instance].publish.zone.name }}/apps-publish-{{ nextcloud_instances[nextcloud_instance].publish.zone.name }}-ca-crt.pem" + verify: "on" + name: "nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }}" + protocols: "TLSv1.3" extra_directives: |- client_max_body_size 0; types { text/javascript js mjs; } + nginx_vhost: "{{ nginx_vhost__yaml | from_yaml }}" include_role: name: nginx/vhost + apply: + delegate_to: "{{ nextcloud_instances[nextcloud_instance].publish.zone.publisher }}" # TODO: @@ -118,7 +177,7 @@ prompt: | ************* {{ nextcloud_instance }} is a new instance ** - ** Go to https://{{ nextcloud_instances[nextcloud_instance].hostnames[0] }} and finalize the + ** Go to https://{{ nextcloud_instances[nextcloud_instance].publish.hostnames[0] }} and finalize the ** installation. After that run the following commands: ** ** $ nextcloud-occ {{ nextcloud_instance }} config:system:set default_phone_region --value='at' diff --git a/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 b/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 index a52a7fc5..8df06113 100644 --- a/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 +++ b/roles/apps/nextcloud/instance/templates/apache-site.conf.j2 @@ -1,3 +1,7 @@ +IncludeOptional mods-available/socache_shmcb.load +IncludeOptional mods-available/ssl.load +IncludeOptional mods-available/ssl.conf + <VirtualHost *:8080> ServerAdmin webmaster@localhost DocumentRoot /var/www/html @@ -5,6 +9,12 @@ UseCanonicalName Off UseCanonicalPhysicalPort Off + ServerName nextcloud-{{ nextcloud_instance }}.{{ inventory_hostname }} + SSLEngine on + SSLCertificateFile "/etc/ssl/publish/cert.pem" + SSLCertificateKeyFile "/etc/ssl/publish/key.pem" + SSLProtocol TLSv1.3 + ErrorLog ${APACHE_LOG_DIR}/error.log CustomLog ${APACHE_LOG_DIR}/access.log combined </VirtualHost> diff --git a/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 b/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 index 42d76757..c1a4f2ea 100644 --- a/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 +++ b/roles/apps/nextcloud/instance/templates/pod-spec-with-mariadb.yml.j2 @@ -13,9 +13,7 @@ containers: {% if 'new' in nextcloud_instances[nextcloud_instance] and nextcloud_instances[nextcloud_instance].new %} env: - name: NEXTCLOUD_TRUSTED_DOMAINS - value: "{{ nextcloud_instances[nextcloud_instance].hostnames | join(' ') }}" - - name: OVERWRITEPROTOCOL - value: "https" + value: "{{ nextcloud_instances[nextcloud_instance].publish.hostnames | join(' ') }}" - name: MYSQL_HOST value: 127.0.0.1 - name: MYSQL_DATABASE @@ -36,6 +34,9 @@ containers: mountPath: /etc/apache2/ports.conf subPath: ports.conf readOnly: true + - name: tls + mountPath: /etc/ssl/publish + readOnly: true ports: - containerPort: 8080 hostPort: {{ nextcloud_instances[nextcloud_instance].port }} @@ -91,6 +92,10 @@ volumes: hostPath: path: "{{ nextcloud_instance_basepath }}/config/" type: Directory +- name: tls + hostPath: + path: "{{ nextcloud_instance_basepath }}/tls/" + type: Directory - name: nextcloud hostPath: path: "{{ nextcloud_instance_basepath }}/nextcloud" diff --git a/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 new file mode 100644 index 00000000..62f6641e --- /dev/null +++ b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then + echo "Usage: $0 (preapre|finalize) <old-version> <new-version>" + exit 1 +fi + +COMMAND="$1" +OLD_VERSION="$2" +NEW_VERSION="$3" +POD_NAME="{{ nextcloud_instance }}-$(hostname)" + +maintenance_mode() { + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + crictl exec "$CONTAINER_ID" php -f /var/www/html/occ maintenance:mode "$1" +} + +wait_for_cronjobs() { + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for running cron script "; while [ -n "$(pgrep -a php | grep cron.php)" ]; do echo -n "."; sleep 1; done; echo ""' +} + +wait_for_upgrade_complete() { + NEW_VERSION="$1" + + set +e + echo -n "waiting for new version to be ready " + while true; do + POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q) + if [ -z $POD_ID ]; then continue; fi + CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q) + if [ -z $CONTAINER_ID ]; then continue; fi + STATUS_OUTPUT=$(crictl exec "$CONTAINER_ID" php -f /var/www/html/occ status -n --no-warnings --output plain) + if [ $? -eq 0 ]; then + RUNNING_VERSION=$(echo "$STATUS_OUTPUT" | awk -F : '/versionstring/ { print($2) }' | tr -d ' ') + if [ "$RUNNING_VERSION" = "$NEW_VERSION" ]; then + break + fi + echo -n "." + fi + sleep 1 + done + echo "" + set -e + crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for apache to start "; while [ -z "$(pgrep apache2)" ]; do echo -n "."; sleep 1; done; echo ""' +} + +storage_snapshot() { + OLD_VERSION="$1" + NEW_VERSION="$2" + +{% if nextcloud_instances[nextcloud_instance].storage.type == 'zfs' %} + ZFS_VOLUME=$(findmnt -no source -T "{{ nextcloud_instance_basepath }}") + echo "creating snapshot for zfs volume: $ZFS_VOLUME" + zfs snapshot "$ZFS_VOLUME@upgrade_$OLD_VERSION-to-$NEW_VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')" +{% endif %} +} + +case "$COMMAND" in + prepare) + maintenance_mode --on + wait_for_cronjobs + storage_snapshot "$OLD_VERSION" "$NEW_VERSION" + ;; + finalize) + wait_for_upgrade_complete "$NEW_VERSION" + maintenance_mode --off + ;; + *) + echo "unknown command: $COMMAND, must be prepare or finalize" + exit 1 + ;; +esac diff --git a/roles/apps/nextcloud/meta/main.yml b/roles/apps/nextcloud/meta/main.yml new file mode 100644 index 00000000..c00c47ce --- /dev/null +++ b/roles/apps/nextcloud/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: apps/nextcloud/base diff --git a/roles/apps/nextcloud/tasks/main.yml b/roles/apps/nextcloud/tasks/main.yml index 69bbba6a..6e81f351 100644 --- a/roles/apps/nextcloud/tasks/main.yml +++ b/roles/apps/nextcloud/tasks/main.yml @@ -1,55 +1,4 @@ --- -- name: add group for nextcloud app - group: - name: nc-app - gid: "{{ nextcloud_app_gid }}" - -- name: add user for nextcloud app - user: - name: nc-app - uid: "{{ nextcloud_app_uid }}" - group: nc-app - password: "!" - -- name: add group for nextcloud db - group: - name: nc-db - gid: "{{ nextcloud_db_gid }}" - -- name: add user for nextcloud db - user: - name: nc-db - uid: "{{ nextcloud_db_uid }}" - group: nc-db - password: "!" - -- name: add group for nextcloud redis - group: - name: nc-redis - gid: "{{ nextcloud_redis_gid }}" - -- name: add user for nextcloud redis - user: - name: nc-redis - uid: "{{ nextcloud_redis_uid }}" - group: nc-redis - password: "!" - -- name: install template systemd unit for cron trigger - template: - src: cron@.service.j2 - dest: /etc/systemd/system/nextcloud-cron@.service - -- name: install management scripts - loop: - - nextcloud-upgrade - - nextcloud-occ - - nextcloud-cron - template: - src: "{{ item }}.j2" - dest: "/usr/local/bin/{{ item }}" - mode: 0755 - - name: instance specific tasks loop: "{{ nextcloud_instances | list }}" loop_control: diff --git a/roles/apps/onlyoffice/defaults/main.yml b/roles/apps/onlyoffice/defaults/main.yml index 1ea4773a..7e33368d 100644 --- a/roles/apps/onlyoffice/defaults/main.yml +++ b/roles/apps/onlyoffice/defaults/main.yml @@ -1,30 +1,21 @@ --- -# onlyoffice_app_uid: "960" -# onlyoffice_app_gid: "960" - -onlyoffice_db_uid: "961" -onlyoffice_db_gid: "961" - -# onlyoffice_amqp_uid: "962" -# onlyoffice_amqp_gid: "962" - -# onlyoffice_base_path: /srv/onlyoffice - -# onlyoffice_zfs: -# pool: storage -# name: onlyoffice -# properties: -# compression: lz4 - # onlyoffice_instances: # example: # version: 6.2.1.24 # port: 8600 # hostname: office.example.com # jwt_secret: very-secure-password +# storage: +# type: ... # database: # version: 9.5.25 # password: secret # amqp: # version: 3.8.14 # password: secret +# publish: +# zone: "{{ apps_publish_zone__foo }}" +# hostnames: +# - office.example.com +# tls: +# certificate_provider: ... diff --git a/roles/apps/onlyoffice/instance/tasks/main.yml b/roles/apps/onlyoffice/instance/tasks/main.yml new file mode 100644 index 00000000..2ca6026d --- /dev/null +++ b/roles/apps/onlyoffice/instance/tasks/main.yml @@ -0,0 +1,88 @@ +--- +- name: prepare storage volume + vars: + storage_volume: "{{ onlyoffice_instances[onlyoffice_instance].storage }}" + include_role: + name: "storage/{{ onlyoffice_instances[onlyoffice_instance].storage.type }}/volume" + +- set_fact: + onlyoffice_instance_basepath: "{{ storage_volume_mountpoint }}" + +- name: create onlyoffice database subdirectory + file: + path: "{{ onlyoffice_instance_basepath }}/postgres" + state: directory + +- name: create onlyoffice tls subdirectory + file: + path: "{{ onlyoffice_instance_basepath }}/tls" + state: directory + mode: 0700 + +- name: generate/install TLS certificates for publishment + vars: + x509_certificate_name: "onlyoffice-{{ onlyoffice_instance }}_publish" + x509_certificate_hostnames: [] + x509_certificate_config: + ca: "{{ onlyoffice_instances[onlyoffice_instance].publish.zone.certificate_ca_config }}" + cert: + common_name: "onlyoffice-{{ onlyoffice_instance }}.{{ inventory_hostname }}" + extended_key_usage: + - serverAuth + extended_key_usage_critical: yes + create_subject_key_identifier: yes + not_after: +100w + x509_certificate_renewal: + install: + - dest: "{{ onlyoffice_instance_basepath }}/tls/onlyoffice.crt" + src: + - cert + mode: "0400" + - dest: "{{ onlyoffice_instance_basepath }}/tls/onlyoffice.key" + src: + - key + mode: "0400" + include_role: + name: "x509/{{ onlyoffice_instances[onlyoffice_instance].publish.zone.certificate_provider }}/cert" + +- name: install pod manifest + vars: + kubernetes_standalone_pod: + name: "onlyoffice-{{ onlyoffice_instance }}" + spec: "{{ lookup('template', 'pod-spec.yml.j2') }}" + mode: "0600" + include_role: + name: kubernetes/standalone/pod + +- name: configure nginx vhost for publishment + vars: + nginx_vhost__yaml: | + name: "onlyoffice-{{ onlyoffice_instance }}.{{ inventory_hostname }}" + template: generic + {% if 'tls' in onlyoffice_instances[onlyoffice_instance].publish %} + tls: + {{ onlyoffice_instances[onlyoffice_instance].publish.tls | to_nice_yaml(indent=2) | indent(2) }} + {% endif %} + hostnames: + {% for hostname in onlyoffice_instances[onlyoffice_instance].publish.hostnames %} + - {{ hostname }} + {% endfor %} + locations: + '/': + {% if onlyoffice_instances[onlyoffice_instance].publish.zone.publisher == inventory_hostname %} + proxy_pass: "https://127.0.0.1:{{ onlyoffice_instances[onlyoffice_instance].port }}" + {% else %} + proxy_pass: "https://{{ ansible_default_ipv4.address }}:{{ onlyoffice_instances[onlyoffice_instance].port }}" + {% endif %} + proxy_ssl: + trusted_certificate: "/etc/ssl/apps-publish-{{ onlyoffice_instances[onlyoffice_instance].publish.zone.name }}/apps-publish-{{ onlyoffice_instances[onlyoffice_instance].publish.zone.name }}-ca-crt.pem" + verify: "on" + name: "onlyoffice-{{ onlyoffice_instance }}.{{ inventory_hostname }}" + protocols: "TLSv1.2 TLSv1.3" + extra_directives: |- + client_max_body_size 0; + nginx_vhost: "{{ nginx_vhost__yaml | from_yaml }}" + include_role: + name: nginx/vhost + apply: + delegate_to: "{{ onlyoffice_instances[onlyoffice_instance].publish.zone.publisher }}" diff --git a/roles/apps/onlyoffice/instance/templates/pod-spec.yml.j2 b/roles/apps/onlyoffice/instance/templates/pod-spec.yml.j2 new file mode 100644 index 00000000..ec70f8c1 --- /dev/null +++ b/roles/apps/onlyoffice/instance/templates/pod-spec.yml.j2 @@ -0,0 +1,75 @@ +terminationGracePeriodSeconds: 120 +containers: +- name: documentserver + image: "onlyoffice/documentserver:{{ onlyoffice_instances[onlyoffice_instance].version }}" + resources: + limits: + memory: "4Gi" + env: + - name: "DB_TYPE" + value: "postgres" + - name: "DB_HOST" + value: "127.0.0.1" + - name: "DB_PORT" + value: "5432" + - name: "DB_NAME" + value: "onlyoffice" + - name: "DB_USER" + value: "onlyoffice" + - name: "DB_PWD" + value: "{{ onlyoffice_instances[onlyoffice_instance].database.password }}" + - name: "AMQP_TYPE" + value: "rabbitmq" + - name: "AMQP_URI" + value: "amqp://onlyoffice:{{ onlyoffice_instances[onlyoffice_instance].amqp.password }}@127.0.0.1:5672" + - name: "JWT_ENABLED" + value: "true" + - name: "JWT_SECRET" + value: "{{ onlyoffice_instances[onlyoffice_instance].jwt_secret }}" + volumeMounts: + - name: tls + mountPath: /var/www/onlyoffice/Data/certs/ + readOnly: true + ports: + - containerPort: 443 + hostPort: {{ onlyoffice_instances[onlyoffice_instance].port }} + hostIP: 127.0.0.1 + +- name: postgresql + image: "postgres:{{ onlyoffice_instances[onlyoffice_instance].database.version }}" + args: + - postgres + - -c + - listen_addresses=127.0.0.1 + env: + - name: "POSTGRES_DB" + value: "onlyoffice" + - name: "POSTGRES_USER" + value: "onlyoffice" + - name: "POSTGRES_PASSWORD" + value: "{{ onlyoffice_instances[onlyoffice_instance].database.password }}" + volumeMounts: + - name: postgres + mountPath: /var/lib/postgresql/data + +- name: rabbitmq + image: "rabbitmq:{{ onlyoffice_instances[onlyoffice_instance].amqp.version }}" + env: + - name: "RABBITMQ_NODENAME" + value: "rabbit@localhost" + - name: "RABBITMQ_NODE_IP_ADDRESS" + value: "127.0.0.1" + - name: "RABBITMQ_DEFAULT_USER" + value: "onlyoffice" + - name: "RABBITMQ_DEFAULT_PASS" + value: "{{ onlyoffice_instances[onlyoffice_instance].amqp.password }}" + +volumes: +- name: tls + hostPath: + path: "{{ onlyoffice_instance_basepath }}/tls" + type: Directory +- name: postgres + hostPath: + path: "{{ onlyoffice_instance_basepath }}/postgres" + type: Directory diff --git a/roles/apps/onlyoffice/tasks/main.yml b/roles/apps/onlyoffice/tasks/main.yml index 960e811b..a42ee589 100644 --- a/roles/apps/onlyoffice/tasks/main.yml +++ b/roles/apps/onlyoffice/tasks/main.yml @@ -1,153 +1,7 @@ --- -- name: create zfs datasets - when: onlyoffice_zfs is defined - block: - - name: create zfs base dataset - zfs: - name: "{{ onlyoffice_zfs.pool }}/{{ onlyoffice_zfs.name }}" - state: present - extra_zfs_properties: "{{ onlyoffice_zfs.properties | dehumanize_zfs_properties | default(omit) }}" - - - name: create zfs volumes for instances - loop: "{{ onlyoffice_instances | dict2items }}" - loop_control: - label: "{{ item.key }} ({{ (item.value.zfs_properties | default({})).items() | map('join', '=') | join(', ') }})" - zfs: - name: "{{ onlyoffice_zfs.pool }}/{{ onlyoffice_zfs.name }}/{{ item.key }}" - state: present - extra_zfs_properties: "{{ item.value.zfs_properties | dehumanize_zfs_properties | default(omit) }}" - - - name: configure onlyoffice base bath - set_fact: - onlyoffice_base_path: "{{ (zfs_pools[onlyoffice_zfs.pool].mountpoint, onlyoffice_zfs.name) | path_join }}" - - -- name: create instance subdirectories - when: onlyoffice_zfs is not defined +- name: instance specific tasks loop: "{{ onlyoffice_instances | list }}" - file: - path: "{{ onlyoffice_base_path }}/{{ item }}" - state: directory - - -# TODO: run documentserver components as non-root -# - name: add group for onlyoffice app -# group: -# name: oo-app -# gid: "{{ onlyoffice_app_gid }}" - -# - name: add user for onlyoffice app -# user: -# name: oo-app -# uid: "{{ onlyoffice_app_uid }}" -# group: oo-app -# password: "!" - -# - name: create onlyoffice app subdirectory -# loop: "{{ onlyoffice_instances | list }}" -# file: -# path: "{{ onlyoffice_base_path }}/{{ item }}/onlyoffice" -# owner: "{{ onlyoffice_app_uid }}" -# group: "{{ onlyoffice_app_gid }}" -# state: directory - - -- name: add group for onlyoffice db - group: - name: oo-db - gid: "{{ onlyoffice_db_gid }}" - -- name: add user for onlyoffice db - user: - name: oo-db - uid: "{{ onlyoffice_db_uid }}" - group: oo-db - password: "!" - -- name: create onlyoffice database subdirectory - loop: "{{ onlyoffice_instances | dict2items}}" loop_control: - label: "{{ item.key }}" - file: - path: "{{ onlyoffice_base_path }}/{{ item.key }}/postgres" - owner: "{{ onlyoffice_db_uid }}" - group: "{{ onlyoffice_db_gid }}" - state: directory - - -# TODO: run documentserver components as non-root -# - name: add group for onlyoffice aqmp -# group: -# name: oo-aqmp -# gid: "{{ onlyoffice_aqmp_gid }}" - -# - name: add user for onlyoffice aqmp -# user: -# name: oo-aqmp -# uid: "{{ onlyoffice_aqmp_uid }}" -# group: oo-aqmp -# password: "!" - -# - name: create onlyoffice aqmp subdirectory -# loop: "{{ onlyoffice_instances | list }}" -# file: -# path: "{{ onlyoffice_base_path }}/{{ item }}/onlyoffice" -# owner: "{{ onlyoffice_aqmp_uid }}" -# group: "{{ onlyoffice_aqmp_gid }}" -# state: directory - -# TODO: AQMP config? -# - name: create onlyoffice rabbitmq subdirectory -# loop: "{{ onlyoffice_instances | dict2items}}" -# loop_control: -# label: "{{ item.key }}" -# file: -# path: "{{ onlyoffice_base_path }}/{{ item.key }}/rabbitmq" -# state: directory - -# - name: install rabbitmq config snipped -# loop: "{{ onlyoffice_instances | dict2items}}" -# loop_control: -# label: "{{ item.key }}" -# copy: -# dest: "{{ onlyoffice_base_path }}/{{ item.key }}/rabbitmq/config" -# content: | -# management.tcp.ip = 127.0.0.1 - - -- name: install pod manifest - loop: "{{ onlyoffice_instances | dict2items }}" - loop_control: - label: "{{ item.key }}" - vars: - kubernetes_standalone_pod: - name: "onlyoffice-{{ item.key }}" - spec: "{{ lookup('template', 'pod-spec.yml.j2') }}" - mode: "0600" -# TODO: AQMP config? -# config_hash_items: -# - path: "{{ onlyoffice_base_path }}/{{ item.key }}/rabbitmq/config" -# properties: -# - checksum - include_role: - name: kubernetes/standalone/pod - -- name: configure nginx vhost - loop: "{{ onlyoffice_instances | dict2items }}" - loop_control: - label: "{{ item.key }}" - vars: - nginx_vhost: - name: "onlyoffice-{{ item.key }}" - template: generic - tls: - certificate_provider: "{{ acme_client }}" - hostnames: - - "{{ item.value.hostname }}" - locations: - '/': - proxy_pass: "http://127.0.0.1:{{ item.value.port }}" - extra_directives: |- - client_max_body_size 0; + loop_var: onlyoffice_instance include_role: - name: nginx/vhost + name: apps/onlyoffice/instance diff --git a/roles/apps/onlyoffice/templates/pod-spec.yml.j2 b/roles/apps/onlyoffice/templates/pod-spec.yml.j2 deleted file mode 100644 index 620e0d18..00000000 --- a/roles/apps/onlyoffice/templates/pod-spec.yml.j2 +++ /dev/null @@ -1,102 +0,0 @@ -{# TODO: -securityContext: - allowPrivilegeEscalation: false -#} -terminationGracePeriodSeconds: 120 -containers: -{# TODO: only listen to localhost #} -- name: documentserver - image: "onlyoffice/documentserver:{{ item.value.version }}" - resources: - limits: - memory: "4Gi" -{# TODO: - securityContext: - allowPrivilegeEscalation: false - runAsUser: {{ onlyoffice_amqp_uid }} - runAsGroup: {{ onlyoffice_amqp_gid }} -#} - env: - - name: "DB_TYPE" - value: "postgres" - - name: "DB_HOST" - value: "127.0.0.1" - - name: "DB_PORT" - value: "5432" - - name: "DB_NAME" - value: "onlyoffice" - - name: "DB_USER" - value: "onlyoffice" - - name: "DB_PWD" - value: "{{ item.value.database.password }}" - - name: "AMQP_TYPE" - value: "rabbitmq" - - name: "AMQP_URI" - value: "amqp://onlyoffice:{{ item.value.amqp.password }}@127.0.0.1:5672" - - name: "JWT_ENABLED" - value: "true" - - name: "JWT_SECRET" - value: "{{ item.value.jwt_secret }}" - ports: - - containerPort: 80 - hostPort: {{ item.value.port }} - hostIP: 127.0.0.1 - -- name: postgresql - image: "postgres:{{ item.value.database.version }}" - args: - - postgres - - -c - - listen_addresses=127.0.0.1 - securityContext: - allowPrivilegeEscalation: false - runAsUser: {{ onlyoffice_db_uid }} - runAsGroup: {{ onlyoffice_db_gid }} - env: - - name: "POSTGRES_DB" - value: "onlyoffice" - - name: "POSTGRES_USER" - value: "onlyoffice" - - name: "POSTGRES_PASSWORD" - value: "{{ item.value.database.password }}" - volumeMounts: - - name: postgres - mountPath: /var/lib/postgresql/data - -{# TODO: only listen to localhost #} -- name: rabbitmq - image: "rabbitmq:{{ item.value.amqp.version }}" -{# TODO: - securityContext: - allowPrivilegeEscalation: false - runAsUser: {{ onlyoffice_amqp_uid }} - runAsGroup: {{ onlyoffice_amqp_gid }} -#} - env: - - name: "RABBITMQ_NODENAME" - value: "rabbit@localhost" - - name: "RABBITMQ_NODE_IP_ADDRESS" - value: "127.0.0.1" - - name: "RABBITMQ_DEFAULT_USER" - value: "onlyoffice" - - name: "RABBITMQ_DEFAULT_PASS" - value: "{{ item.value.amqp.password }}" -{# TODO: AQMP config? - volumeMounts: - - name: rabbitmq - mountPath: /etc/rabbitmq/conf.d/k8s.conf - subPath: config - readOnly: true -#} - -volumes: -- name: postgres - hostPath: - path: "{{ onlyoffice_base_path }}/{{ item.key }}/postgres" - type: Directory -{# TODO: AQMP config? -- name: rabbitmq - hostPath: - path: "{{ onlyoffice_base_path }}/{{ item.key }}/rabbitmq" - type: Directory -#} diff --git a/roles/core/base/tasks/Debian.yml b/roles/core/base/tasks/Debian.yml index 4d2abc17..43f1876e 100644 --- a/roles/core/base/tasks/Debian.yml +++ b/roles/core/base/tasks/Debian.yml @@ -156,6 +156,15 @@ state: "{{ base_enable_fstrim | ternary('started', 'stopped') }}" enabled: "{{ base_enable_fstrim }}" +- name: configure timezone + timezone: + name: "{{ base_timezone }}" + register: etc_localtime + +- name: make sure legacy /etc/timezone is in sync with /etc/localtime + when: etc_localtime is changed + command: dpkg-reconfigure -f noninteractive tzdata + - name: remove cloud-init bullshit loop: - /var/log/cloud-init.log diff --git a/roles/core/base/tasks/OpenBSD.yml b/roles/core/base/tasks/OpenBSD.yml index fbc06379..7ff4f324 100644 --- a/roles/core/base/tasks/OpenBSD.yml +++ b/roles/core/base/tasks/OpenBSD.yml @@ -13,3 +13,7 @@ openbsd_pkg: name: "{{ base_packages_extra }}" state: present + +- name: configure timezone + timezone: + name: "{{ base_timezone }}" diff --git a/roles/core/base/tasks/main.yml b/roles/core/base/tasks/main.yml index fe4a396c..a70bc7ad 100644 --- a/roles/core/base/tasks/main.yml +++ b/roles/core/base/tasks/main.yml @@ -34,7 +34,3 @@ copy: src: "{{ global_files_dir }}/common/htoprc" dest: "{{ item }}/.config/htop/" - -- name: configure timezone - timezone: - name: "{{ base_timezone }}" diff --git a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 index 85742836..8c007ee9 100644 --- a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 +++ b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 @@ -1,65 +1,65 @@ services: vulnerability-tests: - image: greenbone/vulnerability-tests + image: registry.community.greenbone.net/community/vulnerability-tests environment: STORAGE_PATH: /var/lib/openvas/22.04/vt-data/nasl volumes: - vt_data_vol:/mnt notus-data: - image: greenbone/notus-data + image: registry.community.greenbone.net/community/notus-data volumes: - notus_data_vol:/mnt scap-data: - image: greenbone/scap-data + image: registry.community.greenbone.net/community/scap-data volumes: - scap_data_vol:/mnt cert-bund-data: - image: greenbone/cert-bund-data + image: registry.community.greenbone.net/community/cert-bund-data volumes: - cert_data_vol:/mnt dfn-cert-data: - image: greenbone/dfn-cert-data + image: registry.community.greenbone.net/community/dfn-cert-data volumes: - cert_data_vol:/mnt depends_on: - cert-bund-data data-objects: - image: greenbone/data-objects + image: registry.community.greenbone.net/community/data-objects volumes: - data_objects_vol:/mnt report-formats: - image: greenbone/report-formats + image: registry.community.greenbone.net/community/report-formats volumes: - data_objects_vol:/mnt depends_on: - data-objects gpg-data: - image: greenbone/gpg-data + image: registry.community.greenbone.net/community/gpg-data volumes: - gpg_data_vol:/mnt redis-server: - image: greenbone/redis-server + image: registry.community.greenbone.net/community/redis-server restart: on-failure volumes: - redis_socket_vol:/run/redis/ pg-gvm: - image: greenbone/pg-gvm:stable + image: registry.community.greenbone.net/community/pg-gvm:stable restart: on-failure volumes: - psql_data_vol:/var/lib/postgresql - psql_socket_vol:/var/run/postgresql gvmd: - image: greenbone/gvmd:stable + image: registry.community.greenbone.net/community/gvmd:stable restart: on-failure volumes: - gvmd_data_vol:/var/lib/gvm @@ -86,7 +86,7 @@ services: condition: service_completed_successfully gsa: - image: greenbone/gsa:stable + image: registry.community.greenbone.net/community/gsa:stable restart: on-failure ports: - 127.0.0.1:9392:80 @@ -94,9 +94,73 @@ services: - gvmd_socket_vol:/run/gvmd depends_on: - gvmd + # Sets log level of openvas to the set LOG_LEVEL within the env + # and changes log output to /var/log/openvas instead /var/log/gvm + # to reduce likelyhood of unwanted log interferences + configure-openvas: + image: registry.community.greenbone.net/community/openvas-scanner:stable + volumes: + - openvas_data_vol:/mnt + - openvas_log_data_vol:/var/log/openvas + command: + - /bin/sh + - -c + - | + printf "table_driven_lsc = yes\nopenvasd_server = http://openvasd:80\n" > /mnt/openvas.conf + sed "s/level=.*/level=INFO/" /etc/openvas/openvas_log.conf | sed 's/gvm/openvas/' > /mnt/openvas_log.conf + chmod 644 /mnt/openvas.conf + chmod 644 /mnt/openvas_log.conf + touch /var/log/openvas/openvas.log + chmod 666 /var/log/openvas/openvas.log + + # shows logs of openvas + openvas: + image: registry.community.greenbone.net/community/openvas-scanner:stable + restart: on-failure + volumes: + - openvas_data_vol:/etc/openvas + - openvas_log_data_vol:/var/log/openvas + command: + - /bin/sh + - -c + - | + cat /etc/openvas/openvas.conf + tail -f /var/log/openvas/openvas.log + depends_on: + configure-openvas: + condition: service_completed_successfully + + openvasd: + image: registry.community.greenbone.net/community/openvas-scanner:stable + restart: on-failure + environment: + # `service_notus` is set to disable everything but notus, + # if you want to utilize openvasd directly removed `OPENVASD_MODE` + OPENVASD_MODE: service_notus + GNUPGHOME: /etc/openvas/gnupg + LISTENING: 0.0.0.0:80 + volumes: + - openvas_data_vol:/etc/openvas + - openvas_log_data_vol:/var/log/openvas + - gpg_data_vol:/etc/openvas/gnupg + - notus_data_vol:/var/lib/notus + # enable port forwarding when you want to use the http api from your host machine + # ports: + # - 127.0.0.1:3000:80 + depends_on: + vulnerability-tests: + condition: service_completed_successfully + configure-openvas: + condition: service_completed_successfully + gpg-data: + condition: service_completed_successfully + networks: + default: + aliases: + - openvasd ospd-openvas: - image: greenbone/ospd-openvas:stable + image: registry.community.greenbone.net/community/ospd-openvas:stable restart: on-failure hostname: ospd-openvas.local cap_add: @@ -111,8 +175,6 @@ services: "-f", "--config", "/etc/gvm/ospd-openvas.conf", - "--mqtt-broker-address", - "mqtt-broker", "--notus-feed-dir", "/var/lib/notus/advisories", "-m", @@ -124,6 +186,8 @@ services: - notus_data_vol:/var/lib/notus - ospd_openvas_socket_vol:/run/ospd - redis_socket_vol:/run/redis/ + - openvas_data_vol:/etc/openvas/ + - openvas_log_data_vol:/var/log/openvas depends_on: redis-server: condition: service_started @@ -131,32 +195,11 @@ services: condition: service_completed_successfully vulnerability-tests: condition: service_completed_successfully - - mqtt-broker: - restart: on-failure - image: greenbone/mqtt-broker - networks: - default: - aliases: - - mqtt-broker - - broker - - notus-scanner: - restart: on-failure - image: greenbone/notus-scanner:stable - volumes: - - notus_data_vol:/var/lib/notus - - gpg_data_vol:/etc/openvas/gnupg - environment: - NOTUS_SCANNER_MQTT_BROKER_ADDRESS: mqtt-broker - NOTUS_SCANNER_PRODUCTS_DIRECTORY: /var/lib/notus/products - depends_on: - - mqtt-broker - - gpg-data - - vulnerability-tests + configure-openvas: + condition: service_completed_successfully gvm-tools: - image: greenbone/gvm-tools + image: registry.community.greenbone.net/community/gvm-tools volumes: - gvmd_socket_vol:/run/gvmd - ospd_openvas_socket_vol:/run/ospd @@ -177,3 +220,5 @@ volumes: gvmd_socket_vol: ospd_openvas_socket_vol: redis_socket_vol: + openvas_data_vol: + openvas_log_data_vol: diff --git a/roles/installer/debian/preseed/defaults/main.yml b/roles/installer/debian/preseed/defaults/main.yml index 1a8b5b67..ffc9521c 100644 --- a/roles/installer/debian/preseed/defaults/main.yml +++ b/roles/installer/debian/preseed/defaults/main.yml @@ -10,7 +10,7 @@ debian_preseed_locales: debian_preseed_keyboard_layout: de debian_preseed_keyboard_variant: nodeadkeys -debian_preseed_timezone: Europe/Vienna +debian_preseed_timezone: "{{ base_timezone | default('Europe/Vienna') }}" # debian_preseed_force_net_ifnames_policy: path debian_preseed_no_netplan: no diff --git a/roles/installer/openbsd/autoinstall/defaults/main.yml b/roles/installer/openbsd/autoinstall/defaults/main.yml index b166c191..4d8fd865 100644 --- a/roles/installer/openbsd/autoinstall/defaults/main.yml +++ b/roles/installer/openbsd/autoinstall/defaults/main.yml @@ -14,3 +14,5 @@ obsd_autoinstall_file_sets: #- xfont #- xserv #- xshare + +obsd_autoinstall_timezone: "{{ base_timezone | default('Europe/Vienna') }}" diff --git a/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 b/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 index 8c28d6e6..10fa17be 100644 --- a/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 +++ b/roles/installer/openbsd/autoinstall/templates/auto_install.conf.j2 @@ -23,7 +23,7 @@ Setup a user = no Start sshd(8) by default = yes Allow root ssh login = prohibit-password -What timezone are you in = Europe/Vienna +What timezone are you in = {{ obsd_autoinstall_timezone }} Location of sets = cd0 Pathname to the sets = / diff --git a/roles/installer/raspios/image/defaults/main.yml b/roles/installer/raspios/image/defaults/main.yml index 3f6ab3a3..35a76e38 100644 --- a/roles/installer/raspios/image/defaults/main.yml +++ b/roles/installer/raspios/image/defaults/main.yml @@ -13,5 +13,5 @@ raspios_keep_boot_dir_mounted: no # line: 'dtparam=i2c_vc=on' raspios_locale: en_US.UTF-8 -raspios_timezone: Europe/Vienna +raspios_timezone: "{{ base_timezone | default('Europe/Vienna') }}" raspios_keyboard_layout: de diff --git a/roles/installer/ubuntu/autoinstall/defaults/main.yml b/roles/installer/ubuntu/autoinstall/defaults/main.yml index 50a35ae1..16d08a19 100644 --- a/roles/installer/ubuntu/autoinstall/defaults/main.yml +++ b/roles/installer/ubuntu/autoinstall/defaults/main.yml @@ -6,7 +6,7 @@ ubuntu_autoinstall_locale: en_US ubuntu_autoinstall_keyboard_layout: de ubuntu_autoinstall_keyboard_variant: nodeadkeys -ubuntu_autoinstall_timezone: Europe/Vienna +ubuntu_autoinstall_timezone: "{{ base_timezone | default('Europe/Vienna') }}" # ubuntu_autoinstall_kernel_image: ubuntu_autoinstall_virtual_machine: no diff --git a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 index 1de70b70..24cce9b7 100644 --- a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 +++ b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 @@ -237,6 +237,9 @@ autoinstall: dpkg -l | grep "^rc" | awk "{ print(\$2) }" | xargs -r dpkg -P sed '/^PasswordAuthentication /d' -i '/etc/ssh/sshd_config' rm -f '/etc/ssh/sshd_config.d/50-cloud-init.conf' + swapoff -a + sed -e '/^\/swapfile/d' -e '/^\/swap\.img/d' -i /etc/fstab + rm -f /swapfile /swap.img rm -f /root/post-cleanup.sh {% if ubuntu_autoinstall_poweroff_when_done %} poweroff @@ -262,7 +265,6 @@ autoinstall: {% endfor %} late-commands: - - curtin in-target --target=/target -- swapoff -a; sed -e '/^\/swapfile/d' -e '/^\/swap\.img/d' -i /etc/fstab; rm -f /swapfile /swap.img {% if ansible_port is defined %} - curtin in-target --target=/target -- sed -e 's/^\s*#*\s*Port\s\s*[0-9][0-9]*$/Port {{ ansible_port }}/' -i /etc/ssh/sshd_config - curtin in-target --target=/target -- bash -c "mkdir -p /etc/systemd/system/ssh.socket.d; echo -e '[Socket]\nListenStream=\nListenStream={{ ansible_port }}' > /etc/systemd/system/ssh.socket.d/port.conf" @@ -272,7 +274,7 @@ autoinstall: - curtin in-target --target=/target -- apt-get -y -q purge systemd-oomd {% endif %} {% if (install_codename | ubuntu_release_compare('>=', 'noble')) %} - - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer + - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer unminimize {% endif %} {% if ubuntu_autoinstall_desktop is undefined %} - curtin in-target --target=/target -- apt-mark manual iputils-ping isc-dhcp-client netcat-openbsd netplan.io sudo diff --git a/roles/kubernetes/base/tasks/cri_docker.yml b/roles/kubernetes/base/tasks/cri_docker.yml index 6d114b87..252eb7e6 100644 --- a/roles/kubernetes/base/tasks/cri_docker.yml +++ b/roles/kubernetes/base/tasks/cri_docker.yml @@ -24,6 +24,7 @@ - "native.cgroupdriver=systemd" bridge: "none" iptables: false + ip6tables: false log-driver: "json-file" log-opts: max-size: "10M" diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml index 25cffa5b..dd290e9e 100644 --- a/roles/monitoring/prometheus/server/defaults/main/main.yml +++ b/roles/monitoring/prometheus/server/defaults/main/main.yml @@ -75,5 +75,27 @@ prometheus_server_web_listen_address: 127.0.0.1:9090 # - node # - blackbox +prometheus_server_remote_write_receiver: no + +# prometheus_server_remote_write_destinations: +# example: +# url: "https://mon.example.com/prometheus/api/v1/write" +# basic_auth: +# username: remote +# password_file: /etc/prometheus/prometheus-remote.secret +# tls_config: +# ca: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# write_relabel_configs: +# - source_labels: ['__name__'] +# regex: 'go_gc_.*' +# action: 'drop' +# - source_labels: ['job'] +# regex: 'alertmanager' +# action: 'drop' + # prometheus_server_secret_files: # user: secret +# remote: othersecret diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml index 4db6cd17..5cb27264 100644 --- a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml +++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml @@ -93,13 +93,13 @@ prometheus_server_rules_prometheus: description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: PrometheusTargetScrapingSlow - expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60 + expr: prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05 for: 5m labels: severity: warning annotations: summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) - description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: PrometheusLargeScrape expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 diff --git a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 index e65e9425..86c30cbd 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 @@ -6,7 +6,7 @@ After=time-sync.target [Service] Restart=on-failure User=prometheus -ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }} +ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %}{% if prometheus_server_remote_write_receiver %} --web.enable-remote-write-receiver{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }} ExecReload=/bin/kill -HUP $MAINPID TimeoutStopSec=20s SendSIGKILL=no diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index 85adfa52..d72a4815 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -96,3 +96,10 @@ scrape_configs: - targets: ['{{ config.url }}'] {% endfor %} {% endfor %} +{% if prometheus_server_remote_write_destinations is defined %} + +remote_write: +{% for name, config in prometheus_server_remote_write_destinations.items() %} + - {{ config | combine({'name': name }) | to_nice_yaml(indent=2) | indent(4) }} +{% endfor %} +{% endif %} |