summaryrefslogtreecommitdiff
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/apps/nextcloud/base/defaults/main.yml9
-rw-r--r--roles/apps/nextcloud/base/tasks/main.yml51
-rw-r--r--roles/apps/nextcloud/base/templates/cron@.service.j2 (renamed from roles/apps/nextcloud/templates/cron@.service.j2)0
-rwxr-xr-xroles/apps/nextcloud/base/templates/nextcloud-cron.j2 (renamed from roles/apps/nextcloud/templates/nextcloud-cron.j2)2
-rwxr-xr-xroles/apps/nextcloud/base/templates/nextcloud-occ.j2 (renamed from roles/apps/nextcloud/templates/nextcloud-occ.j2)2
-rwxr-xr-xroles/apps/nextcloud/base/templates/nextcloud-upgrade.j2 (renamed from roles/apps/nextcloud/templates/nextcloud-upgrade.j2)21
-rw-r--r--roles/apps/nextcloud/defaults/main.yml9
-rw-r--r--roles/apps/nextcloud/instance/tasks/main.yml7
-rw-r--r--roles/apps/nextcloud/instance/templates/upgrade.sh.j277
-rw-r--r--roles/apps/nextcloud/meta/main.yml3
-rw-r--r--roles/apps/nextcloud/tasks/main.yml51
-rw-r--r--roles/greenbone/server/templates/docker-compose-22.4.yml.j2123
-rw-r--r--roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j22
-rw-r--r--roles/monitoring/prometheus/server/defaults/main/main.yml22
-rw-r--r--roles/monitoring/prometheus/server/templates/prometheus.service.j22
-rw-r--r--roles/monitoring/prometheus/server/templates/prometheus.yml.j27
16 files changed, 275 insertions, 113 deletions
diff --git a/roles/apps/nextcloud/base/defaults/main.yml b/roles/apps/nextcloud/base/defaults/main.yml
new file mode 100644
index 00000000..1a8a6d52
--- /dev/null
+++ b/roles/apps/nextcloud/base/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+nextcloud_app_uid: "950"
+nextcloud_app_gid: "950"
+
+nextcloud_db_uid: "951"
+nextcloud_db_gid: "951"
+
+nextcloud_redis_uid: "952"
+nextcloud_redis_gid: "952"
diff --git a/roles/apps/nextcloud/base/tasks/main.yml b/roles/apps/nextcloud/base/tasks/main.yml
new file mode 100644
index 00000000..4c85a35c
--- /dev/null
+++ b/roles/apps/nextcloud/base/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+- name: add group for nextcloud app
+ group:
+ name: nc-app
+ gid: "{{ nextcloud_app_gid }}"
+
+- name: add user for nextcloud app
+ user:
+ name: nc-app
+ uid: "{{ nextcloud_app_uid }}"
+ group: nc-app
+ password: "!"
+
+- name: add group for nextcloud db
+ group:
+ name: nc-db
+ gid: "{{ nextcloud_db_gid }}"
+
+- name: add user for nextcloud db
+ user:
+ name: nc-db
+ uid: "{{ nextcloud_db_uid }}"
+ group: nc-db
+ password: "!"
+
+- name: add group for nextcloud redis
+ group:
+ name: nc-redis
+ gid: "{{ nextcloud_redis_gid }}"
+
+- name: add user for nextcloud redis
+ user:
+ name: nc-redis
+ uid: "{{ nextcloud_redis_uid }}"
+ group: nc-redis
+ password: "!"
+
+- name: install template systemd unit for cron trigger
+ template:
+ src: cron@.service.j2
+ dest: /etc/systemd/system/nextcloud-cron@.service
+
+- name: install management scripts
+ loop:
+ - nextcloud-upgrade
+ - nextcloud-occ
+ - nextcloud-cron
+ template:
+ src: "{{ item }}.j2"
+ dest: "/usr/local/bin/{{ item }}"
+ mode: 0755
diff --git a/roles/apps/nextcloud/templates/cron@.service.j2 b/roles/apps/nextcloud/base/templates/cron@.service.j2
index d8cde0a3..d8cde0a3 100644
--- a/roles/apps/nextcloud/templates/cron@.service.j2
+++ b/roles/apps/nextcloud/base/templates/cron@.service.j2
diff --git a/roles/apps/nextcloud/templates/nextcloud-cron.j2 b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2
index 355ae2c3..cf1d9715 100755
--- a/roles/apps/nextcloud/templates/nextcloud-cron.j2
+++ b/roles/apps/nextcloud/base/templates/nextcloud-cron.j2
@@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi
container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id")
if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi
-exec crictl exec "$container_id" php -f /var/www/html/cron.php
+exec crictl exec "$container_id" bash -c 'php -f /var/www/html/occ status -e; if [ $? -eq 0 ]; then php -f /var/www/html/cron.php; else echo "not running cron script when in maintenance mode"; fi'
diff --git a/roles/apps/nextcloud/templates/nextcloud-occ.j2 b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2
index f12f1259..01383c95 100755
--- a/roles/apps/nextcloud/templates/nextcloud-occ.j2
+++ b/roles/apps/nextcloud/base/templates/nextcloud-occ.j2
@@ -16,4 +16,4 @@ if [ -z "$pod_id" ]; then echo "Pod not found"; exit 1; fi
container_id=$(crictl ps -q --name '^nextcloud$' -p "$pod_id")
if [ -z "$container_id" ]; then echo "Container not found"; exit 1; fi
-exec crictl exec -it "$container_id" php /var/www/html/occ $@
+exec crictl exec -it "$container_id" php -f /var/www/html/occ $@
diff --git a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2 b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2
index ffa912e8..f6edcb44 100755
--- a/roles/apps/nextcloud/templates/nextcloud-upgrade.j2
+++ b/roles/apps/nextcloud/base/templates/nextcloud-upgrade.j2
@@ -9,6 +9,13 @@ fi
set -eu
+CURRENT_VERSION=$(nextcloud-occ "$INST_NAME" status -n --no-warnings --output plain | tr -d '\r' | awk -F : '/versionstring/ { print($2) }' | tr -d ' ')
+if [ "$CURRENT_VERSION" = "$VERSION" ]; then
+ echo "The current running version of nextcloud is already $CURRENT_VERSION, nothing to do here."
+ exit 0
+fi
+echo "will upgrade nextcloud instance $INST_NAME from '$CURRENT_VERSION' to '$VERSION'"
+
K8S_CONFIG_HASH_D="/etc/kubernetes/config-hashes/"
K8S_CONFIG_HASH_FILE="$K8S_CONFIG_HASH_D/nextcloud-$INST_NAME.yml"
K8S_MANIFEST_D="/etc/kubernetes/manifests/"
@@ -41,16 +48,8 @@ else
echo ""
fi
-STORAGE_TYPE=$(findmnt -no fstype -T "$IMAGE_BUILD_D")
-if [ $STORAGE_TYPE == "zfs" ]; then
- echo "*** creating ZFS snapshot"
- echo ""
-
- IMAGE_NAME_ESCAPED=${IMAGE_NAME/\//\\/}
- CURRENT_VERSION=$(cat "$K8S_MANIFEST_FILE" | awk '/image: "'"$IMAGE_NAME_ESCAPED"':.*"/ { print($2) }' | tr -d '"' | cut -d ':' -f 2)
- ZFS_VOLUME=$(findmnt -no source -T "$IMAGE_BUILD_D")
- zfs snapshot "$ZFS_VOLUME@upgrade_$CURRENT_VERSION-to-$VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')"
-fi
+INSTANCE_BASE_D=$(dirname "$IMAGE_BUILD_D")
+"$INSTANCE_BASE_D/upgrade.sh" prepare "$CURRENT_VERSION" "$VERSION"
echo "*** Rebuilding config-hash file"
echo ""
@@ -70,4 +69,6 @@ cat "$TMP_D/config-hash.yml" > "$K8S_CONFIG_HASH_FILE"
cat "$TMP_D/manifest.yml" > "$K8S_MANIFEST_FILE"
echo ""
+"$INSTANCE_BASE_D/upgrade.sh" finalize "$CURRENT_VERSION" "$VERSION"
+
exit 0
diff --git a/roles/apps/nextcloud/defaults/main.yml b/roles/apps/nextcloud/defaults/main.yml
index 158da180..631b0a0a 100644
--- a/roles/apps/nextcloud/defaults/main.yml
+++ b/roles/apps/nextcloud/defaults/main.yml
@@ -1,13 +1,4 @@
---
-nextcloud_app_uid: "950"
-nextcloud_app_gid: "950"
-
-nextcloud_db_uid: "951"
-nextcloud_db_gid: "951"
-
-nextcloud_redis_uid: "952"
-nextcloud_redis_gid: "952"
-
# nextcloud_instances:
# example:
# new: yes
diff --git a/roles/apps/nextcloud/instance/tasks/main.yml b/roles/apps/nextcloud/instance/tasks/main.yml
index 3c8928b2..71a3ee79 100644
--- a/roles/apps/nextcloud/instance/tasks/main.yml
+++ b/roles/apps/nextcloud/instance/tasks/main.yml
@@ -108,6 +108,13 @@
name: kubernetes/standalone/pod
+- name: install upgrade helper script
+ template:
+ src: upgrade.sh.j2
+ dest: "{{ nextcloud_instance_basepath }}/upgrade.sh"
+ mode: 0755
+
+
- name: install systemd timer unit
template:
src: cron-.timer.j2
diff --git a/roles/apps/nextcloud/instance/templates/upgrade.sh.j2 b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2
new file mode 100644
index 00000000..62f6641e
--- /dev/null
+++ b/roles/apps/nextcloud/instance/templates/upgrade.sh.j2
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+set -e
+
+if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
+ echo "Usage: $0 (preapre|finalize) <old-version> <new-version>"
+ exit 1
+fi
+
+COMMAND="$1"
+OLD_VERSION="$2"
+NEW_VERSION="$3"
+POD_NAME="{{ nextcloud_instance }}-$(hostname)"
+
+maintenance_mode() {
+ POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q)
+ CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q)
+ crictl exec "$CONTAINER_ID" php -f /var/www/html/occ maintenance:mode "$1"
+}
+
+wait_for_cronjobs() {
+ POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q)
+ CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q)
+ crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for running cron script "; while [ -n "$(pgrep -a php | grep cron.php)" ]; do echo -n "."; sleep 1; done; echo ""'
+}
+
+wait_for_upgrade_complete() {
+ NEW_VERSION="$1"
+
+ set +e
+ echo -n "waiting for new version to be ready "
+ while true; do
+ POD_ID=$(crictl pods --name "$POD_NAME" --state ready -q)
+ if [ -z $POD_ID ]; then continue; fi
+ CONTAINER_ID=$(crictl ps --pod "$POD_ID" --name nextcloud -q)
+ if [ -z $CONTAINER_ID ]; then continue; fi
+ STATUS_OUTPUT=$(crictl exec "$CONTAINER_ID" php -f /var/www/html/occ status -n --no-warnings --output plain)
+ if [ $? -eq 0 ]; then
+ RUNNING_VERSION=$(echo "$STATUS_OUTPUT" | awk -F : '/versionstring/ { print($2) }' | tr -d ' ')
+ if [ "$RUNNING_VERSION" = "$NEW_VERSION" ]; then
+ break
+ fi
+ echo -n "."
+ fi
+ sleep 1
+ done
+ echo ""
+ set -e
+ crictl exec "$CONTAINER_ID" bash -c 'echo -n "waiting for apache to start "; while [ -z "$(pgrep apache2)" ]; do echo -n "."; sleep 1; done; echo ""'
+}
+
+storage_snapshot() {
+ OLD_VERSION="$1"
+ NEW_VERSION="$2"
+
+{% if nextcloud_instances[nextcloud_instance].storage.type == 'zfs' %}
+ ZFS_VOLUME=$(findmnt -no source -T "{{ nextcloud_instance_basepath }}")
+ echo "creating snapshot for zfs volume: $ZFS_VOLUME"
+ zfs snapshot "$ZFS_VOLUME@upgrade_$OLD_VERSION-to-$NEW_VERSION""_$(date '+%Y-%m-%m_%H:%M:%S')"
+{% endif %}
+}
+
+case "$COMMAND" in
+ prepare)
+ maintenance_mode --on
+ wait_for_cronjobs
+ storage_snapshot "$OLD_VERSION" "$NEW_VERSION"
+ ;;
+ finalize)
+ wait_for_upgrade_complete "$NEW_VERSION"
+ maintenance_mode --off
+ ;;
+ *)
+ echo "unknown command: $COMMAND, must be prepare or finalize"
+ exit 1
+ ;;
+esac
diff --git a/roles/apps/nextcloud/meta/main.yml b/roles/apps/nextcloud/meta/main.yml
new file mode 100644
index 00000000..c00c47ce
--- /dev/null
+++ b/roles/apps/nextcloud/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: apps/nextcloud/base
diff --git a/roles/apps/nextcloud/tasks/main.yml b/roles/apps/nextcloud/tasks/main.yml
index 69bbba6a..6e81f351 100644
--- a/roles/apps/nextcloud/tasks/main.yml
+++ b/roles/apps/nextcloud/tasks/main.yml
@@ -1,55 +1,4 @@
---
-- name: add group for nextcloud app
- group:
- name: nc-app
- gid: "{{ nextcloud_app_gid }}"
-
-- name: add user for nextcloud app
- user:
- name: nc-app
- uid: "{{ nextcloud_app_uid }}"
- group: nc-app
- password: "!"
-
-- name: add group for nextcloud db
- group:
- name: nc-db
- gid: "{{ nextcloud_db_gid }}"
-
-- name: add user for nextcloud db
- user:
- name: nc-db
- uid: "{{ nextcloud_db_uid }}"
- group: nc-db
- password: "!"
-
-- name: add group for nextcloud redis
- group:
- name: nc-redis
- gid: "{{ nextcloud_redis_gid }}"
-
-- name: add user for nextcloud redis
- user:
- name: nc-redis
- uid: "{{ nextcloud_redis_uid }}"
- group: nc-redis
- password: "!"
-
-- name: install template systemd unit for cron trigger
- template:
- src: cron@.service.j2
- dest: /etc/systemd/system/nextcloud-cron@.service
-
-- name: install management scripts
- loop:
- - nextcloud-upgrade
- - nextcloud-occ
- - nextcloud-cron
- template:
- src: "{{ item }}.j2"
- dest: "/usr/local/bin/{{ item }}"
- mode: 0755
-
- name: instance specific tasks
loop: "{{ nextcloud_instances | list }}"
loop_control:
diff --git a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2 b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2
index 85742836..8c007ee9 100644
--- a/roles/greenbone/server/templates/docker-compose-22.4.yml.j2
+++ b/roles/greenbone/server/templates/docker-compose-22.4.yml.j2
@@ -1,65 +1,65 @@
services:
vulnerability-tests:
- image: greenbone/vulnerability-tests
+ image: registry.community.greenbone.net/community/vulnerability-tests
environment:
STORAGE_PATH: /var/lib/openvas/22.04/vt-data/nasl
volumes:
- vt_data_vol:/mnt
notus-data:
- image: greenbone/notus-data
+ image: registry.community.greenbone.net/community/notus-data
volumes:
- notus_data_vol:/mnt
scap-data:
- image: greenbone/scap-data
+ image: registry.community.greenbone.net/community/scap-data
volumes:
- scap_data_vol:/mnt
cert-bund-data:
- image: greenbone/cert-bund-data
+ image: registry.community.greenbone.net/community/cert-bund-data
volumes:
- cert_data_vol:/mnt
dfn-cert-data:
- image: greenbone/dfn-cert-data
+ image: registry.community.greenbone.net/community/dfn-cert-data
volumes:
- cert_data_vol:/mnt
depends_on:
- cert-bund-data
data-objects:
- image: greenbone/data-objects
+ image: registry.community.greenbone.net/community/data-objects
volumes:
- data_objects_vol:/mnt
report-formats:
- image: greenbone/report-formats
+ image: registry.community.greenbone.net/community/report-formats
volumes:
- data_objects_vol:/mnt
depends_on:
- data-objects
gpg-data:
- image: greenbone/gpg-data
+ image: registry.community.greenbone.net/community/gpg-data
volumes:
- gpg_data_vol:/mnt
redis-server:
- image: greenbone/redis-server
+ image: registry.community.greenbone.net/community/redis-server
restart: on-failure
volumes:
- redis_socket_vol:/run/redis/
pg-gvm:
- image: greenbone/pg-gvm:stable
+ image: registry.community.greenbone.net/community/pg-gvm:stable
restart: on-failure
volumes:
- psql_data_vol:/var/lib/postgresql
- psql_socket_vol:/var/run/postgresql
gvmd:
- image: greenbone/gvmd:stable
+ image: registry.community.greenbone.net/community/gvmd:stable
restart: on-failure
volumes:
- gvmd_data_vol:/var/lib/gvm
@@ -86,7 +86,7 @@ services:
condition: service_completed_successfully
gsa:
- image: greenbone/gsa:stable
+ image: registry.community.greenbone.net/community/gsa:stable
restart: on-failure
ports:
- 127.0.0.1:9392:80
@@ -94,9 +94,73 @@ services:
- gvmd_socket_vol:/run/gvmd
depends_on:
- gvmd
+ # Sets log level of openvas to the set LOG_LEVEL within the env
+ # and changes log output to /var/log/openvas instead /var/log/gvm
+ # to reduce likelyhood of unwanted log interferences
+ configure-openvas:
+ image: registry.community.greenbone.net/community/openvas-scanner:stable
+ volumes:
+ - openvas_data_vol:/mnt
+ - openvas_log_data_vol:/var/log/openvas
+ command:
+ - /bin/sh
+ - -c
+ - |
+ printf "table_driven_lsc = yes\nopenvasd_server = http://openvasd:80\n" > /mnt/openvas.conf
+ sed "s/level=.*/level=INFO/" /etc/openvas/openvas_log.conf | sed 's/gvm/openvas/' > /mnt/openvas_log.conf
+ chmod 644 /mnt/openvas.conf
+ chmod 644 /mnt/openvas_log.conf
+ touch /var/log/openvas/openvas.log
+ chmod 666 /var/log/openvas/openvas.log
+
+ # shows logs of openvas
+ openvas:
+ image: registry.community.greenbone.net/community/openvas-scanner:stable
+ restart: on-failure
+ volumes:
+ - openvas_data_vol:/etc/openvas
+ - openvas_log_data_vol:/var/log/openvas
+ command:
+ - /bin/sh
+ - -c
+ - |
+ cat /etc/openvas/openvas.conf
+ tail -f /var/log/openvas/openvas.log
+ depends_on:
+ configure-openvas:
+ condition: service_completed_successfully
+
+ openvasd:
+ image: registry.community.greenbone.net/community/openvas-scanner:stable
+ restart: on-failure
+ environment:
+ # `service_notus` is set to disable everything but notus,
+ # if you want to utilize openvasd directly removed `OPENVASD_MODE`
+ OPENVASD_MODE: service_notus
+ GNUPGHOME: /etc/openvas/gnupg
+ LISTENING: 0.0.0.0:80
+ volumes:
+ - openvas_data_vol:/etc/openvas
+ - openvas_log_data_vol:/var/log/openvas
+ - gpg_data_vol:/etc/openvas/gnupg
+ - notus_data_vol:/var/lib/notus
+ # enable port forwarding when you want to use the http api from your host machine
+ # ports:
+ # - 127.0.0.1:3000:80
+ depends_on:
+ vulnerability-tests:
+ condition: service_completed_successfully
+ configure-openvas:
+ condition: service_completed_successfully
+ gpg-data:
+ condition: service_completed_successfully
+ networks:
+ default:
+ aliases:
+ - openvasd
ospd-openvas:
- image: greenbone/ospd-openvas:stable
+ image: registry.community.greenbone.net/community/ospd-openvas:stable
restart: on-failure
hostname: ospd-openvas.local
cap_add:
@@ -111,8 +175,6 @@ services:
"-f",
"--config",
"/etc/gvm/ospd-openvas.conf",
- "--mqtt-broker-address",
- "mqtt-broker",
"--notus-feed-dir",
"/var/lib/notus/advisories",
"-m",
@@ -124,6 +186,8 @@ services:
- notus_data_vol:/var/lib/notus
- ospd_openvas_socket_vol:/run/ospd
- redis_socket_vol:/run/redis/
+ - openvas_data_vol:/etc/openvas/
+ - openvas_log_data_vol:/var/log/openvas
depends_on:
redis-server:
condition: service_started
@@ -131,32 +195,11 @@ services:
condition: service_completed_successfully
vulnerability-tests:
condition: service_completed_successfully
-
- mqtt-broker:
- restart: on-failure
- image: greenbone/mqtt-broker
- networks:
- default:
- aliases:
- - mqtt-broker
- - broker
-
- notus-scanner:
- restart: on-failure
- image: greenbone/notus-scanner:stable
- volumes:
- - notus_data_vol:/var/lib/notus
- - gpg_data_vol:/etc/openvas/gnupg
- environment:
- NOTUS_SCANNER_MQTT_BROKER_ADDRESS: mqtt-broker
- NOTUS_SCANNER_PRODUCTS_DIRECTORY: /var/lib/notus/products
- depends_on:
- - mqtt-broker
- - gpg-data
- - vulnerability-tests
+ configure-openvas:
+ condition: service_completed_successfully
gvm-tools:
- image: greenbone/gvm-tools
+ image: registry.community.greenbone.net/community/gvm-tools
volumes:
- gvmd_socket_vol:/run/gvmd
- ospd_openvas_socket_vol:/run/ospd
@@ -177,3 +220,5 @@ volumes:
gvmd_socket_vol:
ospd_openvas_socket_vol:
redis_socket_vol:
+ openvas_data_vol:
+ openvas_log_data_vol:
diff --git a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2 b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2
index 1de70b70..efee3941 100644
--- a/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2
+++ b/roles/installer/ubuntu/autoinstall/templates/autoinstall.yml.j2
@@ -272,7 +272,7 @@ autoinstall:
- curtin in-target --target=/target -- apt-get -y -q purge systemd-oomd
{% endif %}
{% if (install_codename | ubuntu_release_compare('>=', 'noble')) %}
- - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer
+ - curtin in-target --target=/target -- apt-get -y -q purge ubuntu-kernel-accessories ubuntu-pro-client lxd-installer unminimize
{% endif %}
{% if ubuntu_autoinstall_desktop is undefined %}
- curtin in-target --target=/target -- apt-mark manual iputils-ping isc-dhcp-client netcat-openbsd netplan.io sudo
diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml
index 25cffa5b..dd290e9e 100644
--- a/roles/monitoring/prometheus/server/defaults/main/main.yml
+++ b/roles/monitoring/prometheus/server/defaults/main/main.yml
@@ -75,5 +75,27 @@ prometheus_server_web_listen_address: 127.0.0.1:9090
# - node
# - blackbox
+prometheus_server_remote_write_receiver: no
+
+# prometheus_server_remote_write_destinations:
+# example:
+# url: "https://mon.example.com/prometheus/api/v1/write"
+# basic_auth:
+# username: remote
+# password_file: /etc/prometheus/prometheus-remote.secret
+# tls_config:
+# ca: |
+# -----BEGIN CERTIFICATE-----
+# ...
+# -----END CERTIFICATE-----
+# write_relabel_configs:
+# - source_labels: ['__name__']
+# regex: 'go_gc_.*'
+# action: 'drop'
+# - source_labels: ['job']
+# regex: 'alertmanager'
+# action: 'drop'
+
# prometheus_server_secret_files:
# user: secret
+# remote: othersecret
diff --git a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 b/roles/monitoring/prometheus/server/templates/prometheus.service.j2
index e65e9425..86c30cbd 100644
--- a/roles/monitoring/prometheus/server/templates/prometheus.service.j2
+++ b/roles/monitoring/prometheus/server/templates/prometheus.service.j2
@@ -6,7 +6,7 @@ After=time-sync.target
[Service]
Restart=on-failure
User=prometheus
-ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }}
+ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }}{% if prometheus_server_web_external_url is defined %} --web.external-url={{ prometheus_server_web_external_url }}{% endif %}{% if prometheus_server_web_route_prefix is defined %} --web.route-prefix={{ prometheus_server_web_route_prefix }}{% endif %}{% if prometheus_server_auth_users is defined %} --web.config.file=/etc/prometheus/prometheus-web.yml{% endif %}{% if prometheus_server_remote_write_receiver %} --web.enable-remote-write-receiver{% endif %} --web.listen-address={{ prometheus_server_web_listen_address }}
ExecReload=/bin/kill -HUP $MAINPID
TimeoutStopSec=20s
SendSIGKILL=no
diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2
index 85adfa52..d72a4815 100644
--- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2
+++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2
@@ -96,3 +96,10 @@ scrape_configs:
- targets: ['{{ config.url }}']
{% endfor %}
{% endfor %}
+{% if prometheus_server_remote_write_destinations is defined %}
+
+remote_write:
+{% for name, config in prometheus_server_remote_write_destinations.items() %}
+ - {{ config | combine({'name': name }) | to_nice_yaml(indent=2) | indent(4) }}
+{% endfor %}
+{% endif %}