From 3ef83057161e6d973f79805340d4c3d210425465 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 30 May 2021 16:08:03 +0200 Subject: cleanup: old preliminary tasks --- roles/monitoring/prometheus/server/tasks/main.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 784e872a..ec5bd9a9 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -8,12 +8,9 @@ - name: install apt packages apt: - name: prometheus + name: prom-server state: present -- name: listen on localhost only - lineinfile: - path: /etc/default/prometheus - regexp: '^ARGS=' - line: 'ARGS="--web.listen-address=127.0.0.1:9090 --storage.tsdb.retention={{ prometheus_server_retention }}"' - notify: restart prometheus +## TODO: +## - systemd service unit +## - create CA and certificate/key -- cgit v1.2.3 From e29ce4fdbe2ce669c62777fffa18ae8557e54a73 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 30 May 2021 22:28:46 +0200 Subject: prometheus: initial simple server role --- chaos-at-home/ch-mon.yml | 12 +++++ inventory/group_vars/chaos-at-home/network.yml | 3 ++ inventory/host_vars/ch-mon.yml | 62 ++++++++++++++++++++++ inventory/hosts.ini | 2 + roles/monitoring/prometheus/server/tasks/main.yml | 48 ++++++++++++++++- .../server/templates/prometheus.service.j2 | 38 +++++++++++++ .../prometheus/server/templates/prometheus.yml.j2 | 11 ++++ 7 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 chaos-at-home/ch-mon.yml create mode 100644 inventory/host_vars/ch-mon.yml create mode 100644 roles/monitoring/prometheus/server/templates/prometheus.service.j2 create mode 100644 roles/monitoring/prometheus/server/templates/prometheus.yml.j2 (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/chaos-at-home/ch-mon.yml b/chaos-at-home/ch-mon.yml new file mode 100644 index 00000000..fb0eff53 --- /dev/null +++ b/chaos-at-home/ch-mon.yml @@ -0,0 +1,12 @@ +--- +- name: Basic Setup + hosts: ch-mon + roles: + - role: apt-repo/base + - role: core/base + - role: core/sshd/base + - role: core/zsh + - role: core/ntp + - role: storage/lvm/groups + - role: apt-repo/spreadspace + - role: monitoring/prometheus/server diff --git a/inventory/group_vars/chaos-at-home/network.yml b/inventory/group_vars/chaos-at-home/network.yml index db345b75..fa34a7a0 100644 --- a/inventory/group_vars/chaos-at-home/network.yml +++ b/inventory/group_vars/chaos-at-home/network.yml @@ -41,6 +41,7 @@ network_zones: key: "{{ vault_wifi_keys.iot }}" offsets: ch-wled-test: 1 + ch-mon: 230 ch-iot: 254 svc: @@ -63,6 +64,7 @@ network_zones: ch-nic: 53 __svc_http__: 80 __svc_imap__: 143 + ch-mon: 230 ch-router-obsd: 253 ch-router: 254 ############# @@ -83,6 +85,7 @@ network_zones: ch-sw1: 201 ch-ap0: 220 ch-ap1: 221 + ch-mon: 230 ch-gnocchi: 240 ch-router: 241 diff --git a/inventory/host_vars/ch-mon.yml b/inventory/host_vars/ch-mon.yml new file mode 100644 index 00000000..6bfa58d4 --- /dev/null +++ b/inventory/host_vars/ch-mon.yml @@ -0,0 +1,62 @@ +--- +install_jumphost: ch-jump + +install: + vm: + memory: 8G + numcpus: 8 + autostart: yes + disks: + primary: /dev/sda + scsi: + sda: + type: zfs + name: root + size: 10g + sdb: + type: zfs + name: data + size: 50g + interfaces: + - bridge: br-svc + name: svc0 + - bridge: br-iot + name: iot0 + - bridge: br-mgmt + name: mgmt0 + +network: + nameservers: "{{ network_zones.svc.dns }}" + domain: "{{ host_domain }}" + systemd_link: + interfaces: "{{ install.interfaces }}" + primary: &_network_primary_ + name: svc0 + address: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" + gateway: "{{ network_zones.svc.gateway }}" + static_routes: + - destination: "{{ network_zones.lan.prefix }}" + gateway: "{{ network_zones.svc.prefix | ipaddr(network_zones.svc.offsets['ch-gw-lan']) | ipaddr('address') }}" + interfaces: + - *_network_primary_ + - name: iot0 + address: "{{ network_zones.iot.prefix | ipaddr(network_zones.iot.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" + - name: mgmt0 + address: "{{ network_zones.mgmt.prefix | ipaddr(network_zones.mgmt.offsets[inventory_hostname]) | ipaddr('address/prefix') }}" + + +lvm_groups: + mondata: + pvs: + - /dev/sdb + + +spreadspace_apt_repo_components: + - prometheus + +prometheus_server_storage: + type: lvm + vg: mondata + lv: prometheus + size: 30G + fs: ext4 diff --git a/inventory/hosts.ini b/inventory/hosts.ini index 871ee575..954e9374 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -30,6 +30,7 @@ ch-prometheus-legacy host_name=prometheus ch-testvm-prometheus host_name=testvm-prometheus ch-iot host_name=iot ch-vpn host_name=vpn +ch-mon host_name=mon ch-epimetheus host_name=epimetheus ch-mc host_name=mc ch-atlas host_name=atlas @@ -324,6 +325,7 @@ ch-prometheus-legacy ch-testvm-prometheus ch-iot ch-vpn +ch-mon ch-k8s-master [vmhost-ch-prometheus] ch-prometheus diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index ec5bd9a9..ffbc5ffe 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -1,4 +1,11 @@ --- +- name: check if prometheus apt component of spreadspace repo is enabled + assert: + msg: "please enable the 'prometheus' component of spreadspace repo using 'spreadspace_apt_repo_components'" + that: + - spreadspace_apt_repo_components is defined + - "'prometheus' in spreadspace_apt_repo_components" + - name: prepare storage volume for /var/lib/prometheus when: prometheus_server_storage is defined vars: @@ -11,6 +18,45 @@ name: prom-server state: present +- name: create configuration directories + loop: + - rules + - targets + file: + path: "/etc/prometheus/{{ item }}" + state: directory + +- name: generate configuration file + template: + src: prometheus.yml.j2 + dest: /etc/prometheus/prometheus.yml + +- name: add user for server + user: + name: prometheus + system: yes + home: /var/lib/prometheus + create_home: no + +- name: create data directory + file: + path: /var/lib/prometheus/metrics2 + state: directory + owner: prometheus + group: prometheus + ## TODO: -## - systemd service unit ## - create CA and certificate/key + +- name: generate systemd service unit + template: + src: prometheus.service.j2 + dest: /etc/systemd/system/prometheus.service + notify: restart prometheus + +- name: make sure prometheus is enabled and started + systemd: + name: prometheus.service + daemon_reload: yes + state: started + enabled: yes diff --git a/roles/monitoring/prometheus/server/templates/prometheus.service.j2 b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 new file mode 100644 index 00000000..0530e589 --- /dev/null +++ b/roles/monitoring/prometheus/server/templates/prometheus.service.j2 @@ -0,0 +1,38 @@ +[Unit] +Description=Monitoring system and time series database +Documentation=https://prometheus.io/docs/introduction/overview/ man:prometheus(1) +After=time-sync.target + +[Service] +Restart=on-failure +User=prometheus +ExecStart=/usr/bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/var/lib/prometheus/metrics2/ --storage.tsdb.retention.time={{ prometheus_server_retention }} +ExecReload=/bin/kill -HUP $MAINPID +TimeoutStopSec=20s +SendSIGKILL=no + +# systemd hardening-options +AmbientCapabilities= +CapabilityBoundingSet= +DeviceAllow=/dev/null rw +DevicePolicy=strict +LimitMEMLOCK=0 +LimitNOFILE=8192 +LockPersonality=true +MemoryDenyWriteExecute=true +NoNewPrivileges=true +PrivateDevices=true +PrivateTmp=true +PrivateUsers=true +ProtectControlGroups=true +ProtectHome=true +ProtectKernelModules=true +ProtectKernelTunables=true +ProtectSystem=full +RemoveIPC=true +RestrictNamespaces=true +RestrictRealtime=true +SystemCallArchitectures=native + +[Install] +WantedBy=multi-user.target diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 new file mode 100644 index 00000000..cadc3ef0 --- /dev/null +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -0,0 +1,11 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +rule_files: + - /etc/prometheus/rules/*.yml + +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] -- cgit v1.2.3 From 94f56133bb0035fe85ee9e58d573eb4485e9fa42 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Wed, 2 Jun 2021 00:46:12 +0200 Subject: prometheus: initial serverside config --- roles/monitoring/prometheus/exporter/node/tasks/main.yml | 7 ------- roles/monitoring/prometheus/server/defaults/main.yml | 3 +++ roles/monitoring/prometheus/server/tasks/main.yml | 16 ++++++++++++---- .../prometheus/server/templates/prometheus.yml.j2 | 13 ++++++++++++- 4 files changed, 27 insertions(+), 12 deletions(-) (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/roles/monitoring/prometheus/exporter/node/tasks/main.yml b/roles/monitoring/prometheus/exporter/node/tasks/main.yml index 694dafb0..c8756acf 100644 --- a/roles/monitoring/prometheus/exporter/node/tasks/main.yml +++ b/roles/monitoring/prometheus/exporter/node/tasks/main.yml @@ -1,11 +1,4 @@ --- -- name: check if prometheus apt component of spreadspace repo is enabled - assert: - msg: "please enable the 'prometheus' component of spreadspace repo using 'spreadspace_apt_repo_components'" - that: - - spreadspace_apt_repo_components is defined - - "'prometheus' in spreadspace_apt_repo_components" - - name: install apt packages apt: name: prom-exporter-node diff --git a/roles/monitoring/prometheus/server/defaults/main.yml b/roles/monitoring/prometheus/server/defaults/main.yml index b5d13b5d..ab08a2ff 100644 --- a/roles/monitoring/prometheus/server/defaults/main.yml +++ b/roles/monitoring/prometheus/server/defaults/main.yml @@ -4,3 +4,6 @@ # ... prometheus_server_retention: "15d" + +prometheus_server_jobs: + - node diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index ffbc5ffe..5c649f34 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -20,16 +20,18 @@ - name: create configuration directories loop: + - jobs - rules - targets file: path: "/etc/prometheus/{{ item }}" state: directory -- name: generate configuration file - template: - src: prometheus.yml.j2 - dest: /etc/prometheus/prometheus.yml +- name: create sub-directroy for all exporter types in jobs directory + loop: "{{ prometheus_server_jobs }}" + file: + path: "/etc/prometheus/jobs/{{ item }}" + state: directory - name: add user for server user: @@ -48,6 +50,12 @@ ## TODO: ## - create CA and certificate/key +- name: generate configuration file + template: + src: prometheus.yml.j2 + dest: /etc/prometheus/prometheus.yml + notify: restart prometheus + - name: generate systemd service unit template: src: prometheus.service.j2 diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index cadc3ef0..007afa90 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -8,4 +8,15 @@ rule_files: scrape_configs: - job_name: 'prometheus' static_configs: - - targets: ['localhost:9090'] + - targets: ['localhost:9090'] +{% for job in prometheus_server_jobs %} + + - job_name: '{{ job }}' + metrics_path: /proxy + params: + module: + - {{ job }} + file_sd_configs: + - files: + - "/etc/prometheus/jobs/{{ job }}/*.yml" +{% endfor %} -- cgit v1.2.3 From 43ec757a4cf7bc27f2156c490db67e7c38764d1b Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Wed, 2 Jun 2021 01:50:20 +0200 Subject: prometheus: server CA and certificates --- roles/monitoring/prometheus/server/tasks/main.yml | 4 +- roles/monitoring/prometheus/server/tasks/tls.yml | 98 +++++++++++++++++++++++ 2 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 roles/monitoring/prometheus/server/tasks/tls.yml (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 5c649f34..61660a03 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -47,8 +47,8 @@ owner: prometheus group: prometheus -## TODO: -## - create CA and certificate/key +- name: create TLS CA and certificates + import_tasks: tls.yml - name: generate configuration file template: diff --git a/roles/monitoring/prometheus/server/tasks/tls.yml b/roles/monitoring/prometheus/server/tasks/tls.yml new file mode 100644 index 00000000..f9ad5ca3 --- /dev/null +++ b/roles/monitoring/prometheus/server/tasks/tls.yml @@ -0,0 +1,98 @@ +--- +- name: install python-cryptoraphy + apt: + name: "{{ python_basename }}-cryptography" + state: present + +- name: create base directory + file: + path: /etc/ssl/prometheus + state: directory + +- name: create CA directory + file: + path: /etc/ssl/prometheus/ca + state: directory + owner: root + group: root + mode: 0700 + +- name: create server cert/key directory + file: + path: /etc/ssl/prometheus/server + state: directory + owner: root + group: prometheus + mode: 0750 + +- name: create CA private key + openssl_privatekey: + path: /etc/ssl/prometheus/ca/key.pem + type: RSA + size: 4096 + owner: root + group: root + mode: 0600 + +- name: create signing request for CA certificate + openssl_csr: + path: /etc/ssl/prometheus/ca/csr.pem + privatekey_path: /etc/ssl/prometheus/ca/key.pem + CN: "prometheus CA" + useCommonNameForSAN: no + key_usage: + - cRLSign + - digitalSignature + - keyCertSign + key_usage_critical: yes + basic_constraints: + - 'CA:TRUE' + - 'pathlen:0' + basic_constraints_critical: yes + +- name: create self-signed CA certificate + openssl_certificate: + path: /etc/ssl/prometheus/ca-crt.pem + csr_path: /etc/ssl/prometheus/ca/csr.pem + privatekey_path: /etc/ssl/prometheus/ca/key.pem + provider: selfsigned + selfsigned_digest: sha256 + selfsigned_not_after: "+18250d" ## 50 years + + +- name: create server private key to connect to exporter + openssl_privatekey: + path: /etc/ssl/prometheus/server/exporter-key.pem + type: RSA + size: 4096 + owner: prometheus + group: prometheus + mode: 0400 + +- name: create signing request for server certificate to connect to exporter + openssl_csr: + path: /etc/ssl/prometheus/server/exporter-csr.pem + privatekey_path: /etc/ssl/prometheus/server/exporter-key.pem + CN: "{{ inventory_hostname }}" + subject_alt_name: + - "DNS:{{ host_name }}.{{ host_domain }}" + - "IP:{{ ansible_default_ipv4.address }}" + key_usage: + - digitalSignature + key_usage_critical: yes + extended_key_usage: + - clientAuth + extended_key_usage_critical: yes + basic_constraints: + - 'CA:FALSE' + basic_constraints_critical: yes + +- name: create server certificate to connect to exporter + openssl_certificate: + path: /etc/ssl/prometheus/server/exporter-crt.pem + csr_path: /etc/ssl/prometheus/server/exporter-csr.pem + provider: ownca + ownca_path: /etc/ssl/prometheus/ca-crt.pem + ownca_privatekey_path: /etc/ssl/prometheus/ca/key.pem + ownca_digest: sha256 + ownca_not_after: "+18250d" ## 50 years -- cgit v1.2.3 From f0e65f6846bce9b30f8a97bfab68a06795b730ed Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 3 Jun 2021 15:14:23 +0200 Subject: prometheus: add blackbox exporter --- chaos-at-home/ch-mon.yml | 1 + .../prometheus/exporter/blackbox/defaults/main.yml | 22 ++++++++++ .../prometheus/exporter/blackbox/handlers/main.yml | 16 ++++++++ .../prometheus/exporter/blackbox/tasks/main.yml | 39 ++++++++++++++++++ .../exporter/blackbox/templates/config.yml.j2 | 4 ++ .../exporter/blackbox/templates/service.j2 | 32 +++++++++++++++ .../prometheus/exporter/node/templates/service.j2 | 2 +- .../monitoring/prometheus/server/handlers/main.yml | 5 +++ roles/monitoring/prometheus/server/tasks/main.yml | 2 +- .../prometheus/server/templates/prometheus.yml.j2 | 48 ++++++++++++++++++++++ 10 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 roles/monitoring/prometheus/exporter/blackbox/defaults/main.yml create mode 100644 roles/monitoring/prometheus/exporter/blackbox/handlers/main.yml create mode 100644 roles/monitoring/prometheus/exporter/blackbox/tasks/main.yml create mode 100644 roles/monitoring/prometheus/exporter/blackbox/templates/config.yml.j2 create mode 100644 roles/monitoring/prometheus/exporter/blackbox/templates/service.j2 (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/chaos-at-home/ch-mon.yml b/chaos-at-home/ch-mon.yml index 2cb69484..a1179204 100644 --- a/chaos-at-home/ch-mon.yml +++ b/chaos-at-home/ch-mon.yml @@ -12,3 +12,4 @@ - role: monitoring/prometheus/server - role: monitoring/prometheus/exporter/base - role: monitoring/prometheus/exporter/node + - role: monitoring/prometheus/exporter/blackbox diff --git a/roles/monitoring/prometheus/exporter/blackbox/defaults/main.yml b/roles/monitoring/prometheus/exporter/blackbox/defaults/main.yml new file mode 100644 index 00000000..fcf66555 --- /dev/null +++ b/roles/monitoring/prometheus/exporter/blackbox/defaults/main.yml @@ -0,0 +1,22 @@ +--- +prometheus_exporter_blackbox_modules: + icmp: + prober: icmp + tcp_connect: + prober: tcp + tcp_tls: + prober: tcp + tcp: + tls: true + tls_config: + insecure_skip_verify: true + http_2xx: + prober: http + ssh_banner: + prober: tcp + tcp: + query_response: + - expect: "^SSH-2.0-" + - send: "SSH-2.0-blackbox-ssh-check" + +prometheus_exporter_blackbox_modules_extra: {} diff --git a/roles/monitoring/prometheus/exporter/blackbox/handlers/main.yml b/roles/monitoring/prometheus/exporter/blackbox/handlers/main.yml new file mode 100644 index 00000000..99a416e2 --- /dev/null +++ b/roles/monitoring/prometheus/exporter/blackbox/handlers/main.yml @@ -0,0 +1,16 @@ +--- +- name: restart prometheus-blackbox-exporter + service: + name: prometheus-blackbox-exporter + state: restarted + +- name: reload prometheus-blackbox-exporter + service: + name: prometheus-blackbox-exporter + state: reloaded + +- name: reload prometheus-exporter-exporter + service: + name: prometheus-exporter-exporter + ## TODO: implement reload once exporter_exporter supports this... + state: restarted diff --git a/roles/monitoring/prometheus/exporter/blackbox/tasks/main.yml b/roles/monitoring/prometheus/exporter/blackbox/tasks/main.yml new file mode 100644 index 00000000..3b8e997d --- /dev/null +++ b/roles/monitoring/prometheus/exporter/blackbox/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: install apt packages + apt: + name: prom-exporter-blackbox + state: present + +- name: create config directory + file: + path: /etc/prometheus/exporter/blackbox + state: directory + +- name: generate configuration + template: + src: config.yml.j2 + dest: /etc/prometheus/exporter/blackbox/config.yml + notify: reload prometheus-blackbox-exporter + +- name: generate systemd service unit + template: + src: service.j2 + dest: /etc/systemd/system/prometheus-blackbox-exporter.service + notify: restart prometheus-blackbox-exporter + +- name: make sure prometheus-exporter-exporter is enabled and started + systemd: + name: prometheus-blackbox-exporter.service + daemon_reload: yes + state: started + enabled: yes + +- name: register exporter + copy: + content: | + method: http + http: + port: 9115 + path: '/probe' + dest: /etc/prometheus/exporter/enabled/blackbox.yml + notify: reload prometheus-exporter-exporter diff --git a/roles/monitoring/prometheus/exporter/blackbox/templates/config.yml.j2 b/roles/monitoring/prometheus/exporter/blackbox/templates/config.yml.j2 new file mode 100644 index 00000000..01e3f7a0 --- /dev/null +++ b/roles/monitoring/prometheus/exporter/blackbox/templates/config.yml.j2 @@ -0,0 +1,4 @@ +# {{ ansible_managed }} + +modules: + {{ prometheus_exporter_blackbox_modules | combine(prometheus_exporter_blackbox_modules_extra) | to_nice_yaml(indent=2) | indent(2)}} diff --git a/roles/monitoring/prometheus/exporter/blackbox/templates/service.j2 b/roles/monitoring/prometheus/exporter/blackbox/templates/service.j2 new file mode 100644 index 00000000..c9c5712c --- /dev/null +++ b/roles/monitoring/prometheus/exporter/blackbox/templates/service.j2 @@ -0,0 +1,32 @@ +[Unit] +Description=Prometheus blackbox exporter + +[Service] +Restart=always +User=prometheus-exporter +ExecStart=/usr/bin/prometheus-blackbox-exporter --web.listen-address="127.0.0.1:9115" --config.file=/etc/prometheus/exporter/blackbox/config.yml +ExecReload=/bin/kill -HUP $MAINPID + +# systemd hardening-options +AmbientCapabilities=CAP_NET_RAW +CapabilityBoundingSet=CAP_NET_RAW +DeviceAllow=/dev/null rw +DevicePolicy=strict +LockPersonality=true +MemoryDenyWriteExecute=true +NoNewPrivileges=true +PrivateDevices=true +PrivateTmp=true +PrivateUsers=true +ProtectControlGroups=true +ProtectHome=true +ProtectKernelModules=true +ProtectKernelTunables=true +ProtectSystem=strict +RemoveIPC=true +RestrictNamespaces=true +RestrictRealtime=true +SystemCallArchitectures=native + +[Install] +WantedBy=multi-user.target diff --git a/roles/monitoring/prometheus/exporter/node/templates/service.j2 b/roles/monitoring/prometheus/exporter/node/templates/service.j2 index 801850ed..7aa2834a 100644 --- a/roles/monitoring/prometheus/exporter/node/templates/service.j2 +++ b/roles/monitoring/prometheus/exporter/node/templates/service.j2 @@ -1,5 +1,5 @@ [Unit] -Description=Prometheus exporter +Description=Prometheus node exporter [Service] Restart=always diff --git a/roles/monitoring/prometheus/server/handlers/main.yml b/roles/monitoring/prometheus/server/handlers/main.yml index edeba752..bf8735e9 100644 --- a/roles/monitoring/prometheus/server/handlers/main.yml +++ b/roles/monitoring/prometheus/server/handlers/main.yml @@ -3,3 +3,8 @@ service: name: prometheus state: restarted + +- name: reload prometheus + service: + name: prometheus + state: reloaded diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 61660a03..6b030fb4 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -54,7 +54,7 @@ template: src: prometheus.yml.j2 dest: /etc/prometheus/prometheus.yml - notify: restart prometheus + notify: reload prometheus - name: generate systemd service unit template: diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index e94ea043..eb77d6d1 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -1,3 +1,5 @@ +# {{ ansible_managed }} + global: scrape_interval: 15s evaluation_interval: 15s @@ -27,3 +29,49 @@ scrape_configs: - files: - "/etc/prometheus/jobs/{{ job }}/*.yml" {% endfor %} + + ## TODO: temporary test + - job_name: 'ping' + metrics_path: /proxy + params: + module: + - blackbox + - icmp + scheme: https + tls_config: + ca_file: /etc/ssl/prometheus/ca-crt.pem + cert_file: /etc/ssl/prometheus/server/exporter-crt.pem + key_file: /etc/ssl/prometheus/server/exporter-key.pem + static_configs: + - targets: + - 62.99.185.129 + - 9.9.9.9 + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.32.230:9999 + + - job_name: 'tcp_tls' + metrics_path: /proxy + params: + module: + - blackbox + - tcp_tls + scheme: https + tls_config: + ca_file: /etc/ssl/prometheus/ca-crt.pem + cert_file: /etc/ssl/prometheus/server/exporter-crt.pem + key_file: /etc/ssl/prometheus/server/exporter-key.pem + static_configs: + - targets: + - web.chaos-at-home.org:443 + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.32.230:9999 -- cgit v1.2.3 From 8ab24a10ac669ade61761d37e68207b402bc277c Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Sun, 6 Jun 2021 14:57:25 +0200 Subject: prometheus: move CA to seperate role and add prometheus zone groups --- chaos-at-home/ch-mon.yml | 3 +- .../group_vars/promzone-chaos-at-home/vars.yml | 3 ++ inventory/hosts.ini | 10 ++++ roles/monitoring/prometheus/ca/tasks/main.yml | 52 ++++++++++++++++++++ roles/monitoring/prometheus/server/tasks/tls.yml | 55 ++++------------------ 5 files changed, 76 insertions(+), 47 deletions(-) create mode 100644 inventory/group_vars/promzone-chaos-at-home/vars.yml create mode 100644 roles/monitoring/prometheus/ca/tasks/main.yml (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/chaos-at-home/ch-mon.yml b/chaos-at-home/ch-mon.yml index a1179204..bce4adab 100644 --- a/chaos-at-home/ch-mon.yml +++ b/chaos-at-home/ch-mon.yml @@ -9,7 +9,8 @@ - role: core/ntp - role: storage/lvm/groups - role: apt-repo/spreadspace - - role: monitoring/prometheus/server + - role: monitoring/prometheus/ca - role: monitoring/prometheus/exporter/base - role: monitoring/prometheus/exporter/node - role: monitoring/prometheus/exporter/blackbox + - role: monitoring/prometheus/server diff --git a/inventory/group_vars/promzone-chaos-at-home/vars.yml b/inventory/group_vars/promzone-chaos-at-home/vars.yml new file mode 100644 index 00000000..413a6502 --- /dev/null +++ b/inventory/group_vars/promzone-chaos-at-home/vars.yml @@ -0,0 +1,3 @@ +--- +promethues_server: ch-mon +promethues_zone_name: chaos@home diff --git a/inventory/hosts.ini b/inventory/hosts.ini index 954e9374..1c1051aa 100644 --- a/inventory/hosts.ini +++ b/inventory/hosts.ini @@ -379,6 +379,16 @@ vmhost-sk-2019vm-guests vmhost-sk-tomnext-guests +## prometheus monitoring +[promzone-chaos-at-home-server] +ch-mon +[promzone-chaos-at-home] +ch-mon +ch-testvm-prometheus +[promzone-chaos-at-home:children] +promzone-chaos-at-home-server + + ## hoster [hroot] sk-2019 diff --git a/roles/monitoring/prometheus/ca/tasks/main.yml b/roles/monitoring/prometheus/ca/tasks/main.yml new file mode 100644 index 00000000..9f166321 --- /dev/null +++ b/roles/monitoring/prometheus/ca/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: install python-cryptoraphy + apt: + name: "{{ python_basename }}-cryptography" + state: present + +- name: create base directory + file: + path: /etc/ssl/prometheus + state: directory + +- name: create CA directory + file: + path: /etc/ssl/prometheus/ca + state: directory + owner: root + group: root + mode: 0700 + +- name: create CA private key + openssl_privatekey: + path: /etc/ssl/prometheus/ca/key.pem + type: RSA + size: 4096 + owner: root + group: root + mode: 0600 + +- name: create signing request for CA certificate + openssl_csr: + path: /etc/ssl/prometheus/ca/csr.pem + privatekey_path: /etc/ssl/prometheus/ca/key.pem + CN: "CA for promethues zone {{ promethues_zone_name }}" + useCommonNameForSAN: no + key_usage: + - cRLSign + - digitalSignature + - keyCertSign + key_usage_critical: yes + basic_constraints: + - 'CA:TRUE' + - 'pathlen:0' + basic_constraints_critical: yes + +- name: create self-signed CA certificate + openssl_certificate: + path: /etc/ssl/prometheus/ca-crt.pem + csr_path: /etc/ssl/prometheus/ca/csr.pem + privatekey_path: /etc/ssl/prometheus/ca/key.pem + provider: selfsigned + selfsigned_digest: sha256 + selfsigned_not_after: "+18250d" ## 50 years diff --git a/roles/monitoring/prometheus/server/tasks/tls.yml b/roles/monitoring/prometheus/server/tasks/tls.yml index f9ad5ca3..5c112e12 100644 --- a/roles/monitoring/prometheus/server/tasks/tls.yml +++ b/roles/monitoring/prometheus/server/tasks/tls.yml @@ -9,14 +9,6 @@ path: /etc/ssl/prometheus state: directory -- name: create CA directory - file: - path: /etc/ssl/prometheus/ca - state: directory - owner: root - group: root - mode: 0700 - - name: create server cert/key directory file: path: /etc/ssl/prometheus/server @@ -25,42 +17,7 @@ group: prometheus mode: 0750 -- name: create CA private key - openssl_privatekey: - path: /etc/ssl/prometheus/ca/key.pem - type: RSA - size: 4096 - owner: root - group: root - mode: 0600 - -- name: create signing request for CA certificate - openssl_csr: - path: /etc/ssl/prometheus/ca/csr.pem - privatekey_path: /etc/ssl/prometheus/ca/key.pem - CN: "prometheus CA" - useCommonNameForSAN: no - key_usage: - - cRLSign - - digitalSignature - - keyCertSign - key_usage_critical: yes - basic_constraints: - - 'CA:TRUE' - - 'pathlen:0' - basic_constraints_critical: yes - -- name: create self-signed CA certificate - openssl_certificate: - path: /etc/ssl/prometheus/ca-crt.pem - csr_path: /etc/ssl/prometheus/ca/csr.pem - privatekey_path: /etc/ssl/prometheus/ca/key.pem - provider: selfsigned - selfsigned_digest: sha256 - selfsigned_not_after: "+18250d" ## 50 years - - -- name: create server private key to connect to exporter +- name: create private key to connect to exporter openssl_privatekey: path: /etc/ssl/prometheus/server/exporter-key.pem type: RSA @@ -68,8 +25,9 @@ owner: prometheus group: prometheus mode: 0400 + notify: reload prometheus -- name: create signing request for server certificate to connect to exporter +- name: create signing request for client certificate to connect to exporter openssl_csr: path: /etc/ssl/prometheus/server/exporter-csr.pem privatekey_path: /etc/ssl/prometheus/server/exporter-key.pem @@ -87,7 +45,9 @@ - 'CA:FALSE' basic_constraints_critical: yes -- name: create server certificate to connect to exporter +## TODO: implement remote signing? + +- name: create client certificate to connect to exporter openssl_certificate: path: /etc/ssl/prometheus/server/exporter-crt.pem csr_path: /etc/ssl/prometheus/server/exporter-csr.pem @@ -96,3 +56,6 @@ ownca_privatekey_path: /etc/ssl/prometheus/ca/key.pem ownca_digest: sha256 ownca_not_after: "+18250d" ## 50 years + notify: reload prometheus + +## TODO: install /etc/ssl/prometheus/ca-crt.pem from server -- cgit v1.2.3 From 6082a92fa86d121d3ea4256859ee4c9d412e78c0 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 10 Jun 2021 01:15:32 +0200 Subject: promethues: remote certificate signing for exporter/base --- chaos-at-home/ch-testvm-prometheus.yml | 7 +++- inventory/host_vars/ch-testvm-prometheus.yml | 3 ++ roles/monitoring/prometheus/ca/tasks/main.yml | 2 +- .../prometheus/exporter/base/tasks/tls.yml | 49 +++++++++++++++++++--- roles/monitoring/prometheus/server/tasks/tls.yml | 34 ++++++++++----- .../prometheus/server/templates/prometheus.yml.j2 | 16 +++---- 6 files changed, 85 insertions(+), 26 deletions(-) (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/chaos-at-home/ch-testvm-prometheus.yml b/chaos-at-home/ch-testvm-prometheus.yml index a34d58e3..9caa2f9a 100644 --- a/chaos-at-home/ch-testvm-prometheus.yml +++ b/chaos-at-home/ch-testvm-prometheus.yml @@ -7,5 +7,8 @@ - role: core/sshd/base - role: core/zsh - role: core/ntp - - role: kubernetes/base - - role: kubernetes/standalone/base + - role: apt-repo/spreadspace + - role: monitoring/prometheus/exporter/base + - role: monitoring/prometheus/exporter/node + # - role: kubernetes/base + # - role: kubernetes/standalone/base diff --git a/inventory/host_vars/ch-testvm-prometheus.yml b/inventory/host_vars/ch-testvm-prometheus.yml index d11d565c..e539735f 100644 --- a/inventory/host_vars/ch-testvm-prometheus.yml +++ b/inventory/host_vars/ch-testvm-prometheus.yml @@ -33,6 +33,9 @@ network: - *_network_primary_ +spreadspace_apt_repo_components: + - prometheus + containerd_storage: type: lvm diff --git a/roles/monitoring/prometheus/ca/tasks/main.yml b/roles/monitoring/prometheus/ca/tasks/main.yml index 9f166321..cde4a267 100644 --- a/roles/monitoring/prometheus/ca/tasks/main.yml +++ b/roles/monitoring/prometheus/ca/tasks/main.yml @@ -34,7 +34,6 @@ useCommonNameForSAN: no key_usage: - cRLSign - - digitalSignature - keyCertSign key_usage_critical: yes basic_constraints: @@ -50,3 +49,4 @@ provider: selfsigned selfsigned_digest: sha256 selfsigned_not_after: "+18250d" ## 50 years + selfsigned_create_subject_key_identifier: always_create diff --git a/roles/monitoring/prometheus/exporter/base/tasks/tls.yml b/roles/monitoring/prometheus/exporter/base/tasks/tls.yml index b2731b09..72186acb 100644 --- a/roles/monitoring/prometheus/exporter/base/tasks/tls.yml +++ b/roles/monitoring/prometheus/exporter/base/tasks/tls.yml @@ -45,17 +45,56 @@ - 'CA:FALSE' basic_constraints_critical: yes -## TODO: implement remote singing using server +- name: slurp CSR + slurp: + src: /etc/ssl/prometheus/exporter/csr.pem + register: prometheus_exporter_server_csr -- name: create exporter certificate - openssl_certificate: +- name: check if exporter certificate exists + stat: path: /etc/ssl/prometheus/exporter/crt.pem - csr_path: /etc/ssl/prometheus/exporter/csr.pem + register: prometheus_exporter_server_cert + +- name: read exporter client certificate issuer key id and validity + when: prometheus_exporter_server_cert.stat.exists + openssl_certificate_info: + path: /etc/ssl/prometheus/exporter/crt.pem + valid_at: + ten_years: '+3650d' + register: prometheus_exporter_server_cert_info + +- name: slurp existing exporter certificate + when: prometheus_exporter_server_cert.stat.exists + slurp: + src: /etc/ssl/prometheus/exporter/crt.pem + register: prometheus_exporter_server_cert_current + +- name: generate exporter certificate + delegate_to: "{{ promethues_server }}" + community.crypto.x509_certificate_pipe: + content: "{{ prometheus_exporter_server_cert_current.content | default('') | b64decode }}" + csr_content: "{{ prometheus_exporter_server_csr.content | b64decode }}" provider: ownca ownca_path: /etc/ssl/prometheus/ca-crt.pem ownca_privatekey_path: /etc/ssl/prometheus/ca/key.pem ownca_digest: sha256 ownca_not_after: "+18250d" ## 50 years + force: "{{ prometheus_exporter_server_cert.stat.exists and (not prometheus_exporter_server_cert_info.valid_at.ten_years) }}" + register: prometheus_exporter_server_cert + +- name: store exporter certificate + copy: + content: "{{ prometheus_exporter_server_cert.certificate }}" + dest: /etc/ssl/prometheus/exporter/crt.pem notify: restart prometheus-exporter-exporter -## TODO: install /etc/ssl/prometheus/ca-crt.pem from server +- name: slurp CA certificate + delegate_to: "{{ promethues_server }}" + slurp: + src: /etc/ssl/prometheus/ca-crt.pem + register: prometheus_exporter_ca_certificate + +- name: install CA certificate + copy: + content: "{{ prometheus_exporter_ca_certificate.content | b64decode }}" + dest: /etc/ssl/prometheus/ca-crt.pem diff --git a/roles/monitoring/prometheus/server/tasks/tls.yml b/roles/monitoring/prometheus/server/tasks/tls.yml index 5c112e12..940c69b1 100644 --- a/roles/monitoring/prometheus/server/tasks/tls.yml +++ b/roles/monitoring/prometheus/server/tasks/tls.yml @@ -17,9 +17,9 @@ group: prometheus mode: 0750 -- name: create private key to connect to exporter +- name: create private key for scrape-client certificate openssl_privatekey: - path: /etc/ssl/prometheus/server/exporter-key.pem + path: /etc/ssl/prometheus/server/scrape-key.pem type: RSA size: 4096 owner: prometheus @@ -27,10 +27,10 @@ mode: 0400 notify: reload prometheus -- name: create signing request for client certificate to connect to exporter +- name: create signing request for scrape-client certificate openssl_csr: - path: /etc/ssl/prometheus/server/exporter-csr.pem - privatekey_path: /etc/ssl/prometheus/server/exporter-key.pem + path: /etc/ssl/prometheus/server/scrape-csr.pem + privatekey_path: /etc/ssl/prometheus/server/scrape-key.pem CN: "{{ inventory_hostname }}" subject_alt_name: - "DNS:{{ host_name }}.{{ host_domain }}" @@ -45,17 +45,31 @@ - 'CA:FALSE' basic_constraints_critical: yes +## TODO: install /etc/ssl/prometheus/ca-crt.pem from CA host + +- name: check if scrape-client certificate exists + stat: + path: /etc/ssl/prometheus/server/scrape-crt.pem + register: prometheus_server_scrape_client_cert + +- name: check scrape-client certificate validity + when: prometheus_server_scrape_client_cert.stat.exists + openssl_certificate_info: + path: /etc/ssl/prometheus/server/scrape-crt.pem + valid_at: + ten_years: '+3650d' + register: prometheus_server_scrape_client_cert_info + ## TODO: implement remote signing? -- name: create client certificate to connect to exporter +- name: create scrape-client certificate openssl_certificate: - path: /etc/ssl/prometheus/server/exporter-crt.pem - csr_path: /etc/ssl/prometheus/server/exporter-csr.pem + path: /etc/ssl/prometheus/server/scrape-crt.pem + csr_path: /etc/ssl/prometheus/server/scrape-csr.pem provider: ownca ownca_path: /etc/ssl/prometheus/ca-crt.pem ownca_privatekey_path: /etc/ssl/prometheus/ca/key.pem ownca_digest: sha256 ownca_not_after: "+18250d" ## 50 years + force: "{{ prometheus_server_scrape_client_cert.stat.exists and (not prometheus_server_scrape_client_cert_info.valid_at.ten_years) }}" notify: reload prometheus - -## TODO: install /etc/ssl/prometheus/ca-crt.pem from server diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index 5eb7c570..3975c74d 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -23,8 +23,8 @@ scrape_configs: scheme: https tls_config: ca_file: /etc/ssl/prometheus/ca-crt.pem - cert_file: /etc/ssl/prometheus/server/exporter-crt.pem - key_file: /etc/ssl/prometheus/server/exporter-key.pem + cert_file: /etc/ssl/prometheus/server/scrape-crt.pem + key_file: /etc/ssl/prometheus/server/scrape-key.pem file_sd_configs: - files: - "/etc/prometheus/jobs/{{ job }}/*.yml" @@ -40,8 +40,8 @@ scrape_configs: scheme: https tls_config: ca_file: /etc/ssl/prometheus/ca-crt.pem - cert_file: /etc/ssl/prometheus/server/exporter-crt.pem - key_file: /etc/ssl/prometheus/server/exporter-key.pem + cert_file: /etc/ssl/prometheus/server/scrape-crt.pem + key_file: /etc/ssl/prometheus/server/scrape-key.pem static_configs: - targets: - 62.99.185.129 @@ -63,8 +63,8 @@ scrape_configs: scheme: https tls_config: ca_file: /etc/ssl/prometheus/ca-crt.pem - cert_file: /etc/ssl/prometheus/server/exporter-crt.pem - key_file: /etc/ssl/prometheus/server/exporter-key.pem + cert_file: /etc/ssl/prometheus/server/scrape-crt.pem + key_file: /etc/ssl/prometheus/server/scrape-key.pem static_configs: - targets: - web.chaos-at-home.org @@ -85,8 +85,8 @@ scrape_configs: scheme: https tls_config: ca_file: /etc/ssl/prometheus/ca-crt.pem - cert_file: /etc/ssl/prometheus/server/exporter-crt.pem - key_file: /etc/ssl/prometheus/server/exporter-key.pem + cert_file: /etc/ssl/prometheus/server/scrape-crt.pem + key_file: /etc/ssl/prometheus/server/scrape-key.pem static_configs: - targets: - 192.168.32.230:222 -- cgit v1.2.3 From 11baa089a6aaf62a5c35f8009aebf889a4bf85fa Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Thu, 10 Jun 2021 01:29:39 +0200 Subject: prometheus: generate target configs --- inventory/group_vars/promzone-chaos-at-home/vars.yml | 2 ++ roles/monitoring/prometheus/server/tasks/main.yml | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/inventory/group_vars/promzone-chaos-at-home/vars.yml b/inventory/group_vars/promzone-chaos-at-home/vars.yml index 413a6502..8a0d0aa8 100644 --- a/inventory/group_vars/promzone-chaos-at-home/vars.yml +++ b/inventory/group_vars/promzone-chaos-at-home/vars.yml @@ -1,3 +1,5 @@ --- promethues_server: ch-mon promethues_zone_name: chaos@home + +prometheus_zone_targets: "{{ groups['promzone-chaos-at-home'] }}" diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 6b030fb4..492e8dc2 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -50,6 +50,17 @@ - name: create TLS CA and certificates import_tasks: tls.yml +- name: generate targets config + loop: "{{ prometheus_zone_targets }}" + copy: + content: | + - targets: [ "{{ hostvars[item].ansible_default_ipv4.address }}:9999" ] + labels: + instance: "{{ item }}" + dest: "/etc/prometheus/targets/{{ item }}.yml" + +# TODO: enable targets for configured jobs using symlinks in /etc/prometheus/jobs/*/ + - name: generate configuration file template: src: prometheus.yml.j2 -- cgit v1.2.3 From 1e9d610bb87ce6f0cb1e5a8d44f09616f90273e2 Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Fri, 18 Jun 2021 01:24:40 +0200 Subject: prometheus enable/disable targets for jobs --- .../group_vars/promzone-chaos-at-home/vars.yml | 12 ++++++--- roles/monitoring/prometheus/ca/tasks/main.yml | 2 +- .../prometheus/exporter/base/tasks/tls.yml | 4 +-- .../prometheus/server/filter_plugins/prometheus.py | 29 ++++++++++++++++++++++ roles/monitoring/prometheus/server/tasks/main.yml | 11 ++++++-- 5 files changed, 49 insertions(+), 9 deletions(-) create mode 100644 roles/monitoring/prometheus/server/filter_plugins/prometheus.py (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/inventory/group_vars/promzone-chaos-at-home/vars.yml b/inventory/group_vars/promzone-chaos-at-home/vars.yml index 2345292b..078576f1 100644 --- a/inventory/group_vars/promzone-chaos-at-home/vars.yml +++ b/inventory/group_vars/promzone-chaos-at-home/vars.yml @@ -1,9 +1,13 @@ --- -promethues_server: ch-mon -promethues_zone_name: chaos@home - -prometheus_zone_targets: "{{ groups['promzone-chaos-at-home'] }}" +prometheus_scrape_endpoint: "{{ network.primary.address | ipaddr('address') }}:9999" prometheus_exporters_extra: [] prometheus_exporters_default: - node + +prometheus_server: ch-mon +prometheus_server_jobs: + - node + +prometheus_zone_name: chaos@home +prometheus_zone_targets: "{{ groups['promzone-chaos-at-home'] }}" diff --git a/roles/monitoring/prometheus/ca/tasks/main.yml b/roles/monitoring/prometheus/ca/tasks/main.yml index cde4a267..064cb6e8 100644 --- a/roles/monitoring/prometheus/ca/tasks/main.yml +++ b/roles/monitoring/prometheus/ca/tasks/main.yml @@ -30,7 +30,7 @@ openssl_csr: path: /etc/ssl/prometheus/ca/csr.pem privatekey_path: /etc/ssl/prometheus/ca/key.pem - CN: "CA for promethues zone {{ promethues_zone_name }}" + CN: "CA for prometheus zone {{ prometheus_zone_name }}" useCommonNameForSAN: no key_usage: - cRLSign diff --git a/roles/monitoring/prometheus/exporter/base/tasks/tls.yml b/roles/monitoring/prometheus/exporter/base/tasks/tls.yml index 72186acb..2f880e6a 100644 --- a/roles/monitoring/prometheus/exporter/base/tasks/tls.yml +++ b/roles/monitoring/prometheus/exporter/base/tasks/tls.yml @@ -70,7 +70,7 @@ register: prometheus_exporter_server_cert_current - name: generate exporter certificate - delegate_to: "{{ promethues_server }}" + delegate_to: "{{ prometheus_server }}" community.crypto.x509_certificate_pipe: content: "{{ prometheus_exporter_server_cert_current.content | default('') | b64decode }}" csr_content: "{{ prometheus_exporter_server_csr.content | b64decode }}" @@ -89,7 +89,7 @@ notify: restart prometheus-exporter-exporter - name: slurp CA certificate - delegate_to: "{{ promethues_server }}" + delegate_to: "{{ prometheus_server }}" slurp: src: /etc/ssl/prometheus/ca-crt.pem register: prometheus_exporter_ca_certificate diff --git a/roles/monitoring/prometheus/server/filter_plugins/prometheus.py b/roles/monitoring/prometheus/server/filter_plugins/prometheus.py new file mode 100644 index 00000000..81cfae70 --- /dev/null +++ b/roles/monitoring/prometheus/server/filter_plugins/prometheus.py @@ -0,0 +1,29 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from functools import partial + +from ansible import errors + + +def prometheus_job_targets(hostvars, jobs, targets): + try: + result = [] + for job in jobs: + for target in targets: + enabled = job in hostvars[target]['prometheus_exporters_default'] or job in hostvars[target]['prometheus_exporters_extra'] + result.append({'job': job, 'target': target, 'enabled': enabled}) + return result + except Exception as e: + raise errors.AnsibleFilterError("prometheus_job_targets(): %s" % str(e)) + + +class FilterModule(object): + + ''' prometheus filters ''' + filter_map = { + 'prometheus_job_targets': prometheus_job_targets, + } + + def filters(self): + return self.filter_map diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 492e8dc2..44f0800e 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -54,12 +54,19 @@ loop: "{{ prometheus_zone_targets }}" copy: content: | - - targets: [ "{{ hostvars[item].ansible_default_ipv4.address }}:9999" ] + - targets: [ "{{ hostvars[item].prometheus_scrape_endpoint }}" ] labels: instance: "{{ item }}" dest: "/etc/prometheus/targets/{{ item }}.yml" -# TODO: enable targets for configured jobs using symlinks in /etc/prometheus/jobs/*/ +- name: enable targets for jobs + loop: "{{ hostvars | prometheus_job_targets(prometheus_server_jobs, prometheus_zone_targets) }}" + loop_control: + label: "{{ item.job }} -> {{ item.target }}" + file: + src: "{{ item.enabled | ternary('/etc/prometheus/targets/' + item.target + '.yml', omit) }}" + path: "/etc/prometheus/jobs/{{ item.job }}/{{ item.target }}.yml" + state: "{{ item.enabled | ternary('link', 'absent') }}" - name: generate configuration file template: -- cgit v1.2.3 From 1a40395d35db76e1482bc32fb7a97e6a60c4b1dc Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 21 Jun 2021 00:23:51 +0200 Subject: promethues: initial support for alert rules --- .../monitoring/prometheus/server/defaults/main.yml | 9 - .../prometheus/server/defaults/main/main.yml | 13 ++ .../prometheus/server/defaults/main/rules_node.yml | 219 +++++++++++++++++++++ .../server/defaults/main/rules_prometheus.yml | 192 ++++++++++++++++++ roles/monitoring/prometheus/server/tasks/main.yml | 39 ++-- .../prometheus/server/templates/rules.yml.j2 | 5 + 6 files changed, 453 insertions(+), 24 deletions(-) delete mode 100644 roles/monitoring/prometheus/server/defaults/main.yml create mode 100644 roles/monitoring/prometheus/server/defaults/main/main.yml create mode 100644 roles/monitoring/prometheus/server/defaults/main/rules_node.yml create mode 100644 roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml create mode 100644 roles/monitoring/prometheus/server/templates/rules.yml.j2 (limited to 'roles/monitoring/prometheus/server/tasks') diff --git a/roles/monitoring/prometheus/server/defaults/main.yml b/roles/monitoring/prometheus/server/defaults/main.yml deleted file mode 100644 index ab08a2ff..00000000 --- a/roles/monitoring/prometheus/server/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# prometheus_server_storage: -# type: (zfs|lvm) -# ... - -prometheus_server_retention: "15d" - -prometheus_server_jobs: - - node diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml new file mode 100644 index 00000000..b10d6f17 --- /dev/null +++ b/roles/monitoring/prometheus/server/defaults/main/main.yml @@ -0,0 +1,13 @@ +--- +# prometheus_server_storage: +# type: (zfs|lvm) +# ... + +prometheus_server_retention: "15d" + +prometheus_server_jobs: + - node + +prometheus_server_rules: + prometheus: "{{ prometheus_server_rules_prometheus + prometheus_server_rules_prometheus_extra }}" + node: "{{ prometheus_server_rules_node + prometheus_server_rules_prometheus_extra }}" diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_node.yml b/roles/monitoring/prometheus/server/defaults/main/rules_node.yml new file mode 100644 index 00000000..ab7317ac --- /dev/null +++ b/roles/monitoring/prometheus/server/defaults/main/rules_node.yml @@ -0,0 +1,219 @@ +--- +## https://awesome-prometheus-alerts.grep.to/rules#host-and-hardware +prometheus_server_rules_node_extra: [] +prometheus_server_rules_node: + - alert: HostOutOfMemory + expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10 + for: 2m + labels: + severity: warning + annotations: + summary: Host out of memory (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Node memory is filling up (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostMemoryUnderMemoryPressure + expr: rate(node_vmstat_pgmajfault[1m]) > 1000 + for: 2m + labels: + severity: warning + annotations: + summary: Host memory under memory pressure (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostOutOfDiskSpace + expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host out of disk space (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Disk is almost full (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostDiskWillFillIn24Hours + expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host disk will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostOutOfInodes + expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host out of inodes (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostInodesWillFillIn24Hours + expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{mountpoint="/rootfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host inodes will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostUnusualDiskReadLatency + expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host unusual disk read latency (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostUnusualDiskWriteLatency + expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host unusual disk write latency (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostHighCpuLoad + expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80 + for: 0m + labels: + severity: warning + annotations: + summary: Host high CPU load (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "CPU load is > 80%\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostCpuStealNoisyNeighbor + expr: avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10 + for: 0m + labels: + severity: warning + annotations: + summary: Host CPU steal noisy neighbor (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostSystemdServiceCrashed + expr: node_systemd_unit_state{state="failed"} == 1 + for: 0m + labels: + severity: warning + annotations: + summary: Host systemd service crashed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "systemd service crashed\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostPhysicalComponentTooHot + expr: node_hwmon_temp_celsius > 75 + for: 5m + labels: + severity: warning + annotations: + summary: Host physical component too hot (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Physical hardware component too hot\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostNodeOvertemperatureAlarm + expr: node_hwmon_temp_crit_alarm_celsius == 1 + for: 0m + labels: + severity: critical + annotations: + summary: Host node overtemperature alarm (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Physical node temperature alarm triggered\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostRaidArrayGotInactive + expr: node_md_state{state="inactive"} > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Host RAID array got inactive (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "RAID array {{ '{{' }} $labels.device {{ '}}' }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostRaidDiskFailure + expr: node_md_disks{state="failed"} > 0 + for: 2m + labels: + severity: warning + annotations: + summary: Host RAID disk failure (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "At least one device in RAID array on {{ '{{' }} $labels.instance {{ '}}' }} failed. Array {{ '{{' }} $labels.md_device {{ '}}' }} needs attention and possibly a disk swap\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostOomKillDetected + expr: increase(node_vmstat_oom_kill[1m]) > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Host OOM kill detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "OOM kill detected\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostEdacCorrectableErrorsDetected + expr: increase(node_edac_correctable_errors_total[1m]) > 0 + for: 0m + labels: + severity: info + annotations: + summary: Host EDAC Correctable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostEdacUncorrectableErrorsDetected + expr: node_edac_uncorrectable_errors_total > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Host EDAC Uncorrectable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostNetworkReceiveErrors + expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 + for: 2m + labels: + severity: warning + annotations: + summary: Host Network Receive Errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} receive errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostNetworkTransmitErrors + expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 + for: 2m + labels: + severity: warning + annotations: + summary: Host Network Transmit Errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} transmit errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostNetworkInterfaceSaturated + expr: (rate(node_network_receive_bytes_total{device!~"^tap.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*"} > 0.8 + for: 1m + labels: + severity: warning + annotations: + summary: Host Network Interface Saturated (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "The network interface \"{{ '{{' }} $labels.interface {{ '}}' }}\" on \"{{ '{{' }} $labels.instance {{ '}}' }}\" is getting overloaded.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostConntrackLimit + expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8 + for: 5m + labels: + severity: warning + annotations: + summary: Host conntrack limit (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "The number of conntrack is approching limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostClockSkew + expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0) + for: 2m + labels: + severity: warning + annotations: + summary: Host clock skew (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Clock skew detected. Clock is out of sync.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: HostClockNotSynchronising + expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16 + for: 2m + labels: + severity: warning + annotations: + summary: Host clock not synchronising (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Clock not synchronising.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml new file mode 100644 index 00000000..6d84efa4 --- /dev/null +++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml @@ -0,0 +1,192 @@ +--- +## https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring +prometheus_server_rules_prometheus_extra: [] +prometheus_server_rules_prometheus: + - alert: PrometheusJobMissing + expr: absent(up{job="prometheus"}) + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusJobMissing + expr: absent(up{job="prometheus"}) + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetMissing + expr: up == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusAllTargetsMissing + expr: count by (job) (up) == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus all targets missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job does not have living target anymore.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusConfigurationReloadFailure + expr: prometheus_config_last_reload_successful != 1 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTooManyRestarts + expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus too many restarts (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusRuleEvaluationFailures + expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus rule evaluation failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTemplateTextExpansionFailures + expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus template text expansion failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} template text expansion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusRuleEvaluationSlow + expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus rule evaluation slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusNotificationsBacklog + expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus notifications backlog (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetEmpty + expr: prometheus_sd_discovered_targets == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target empty (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetScrapingSlow + expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60 + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusLargeScrape + expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus large scrape (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetScrapeDuplicate + expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus target scrape duplicate (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCheckpointCreationFailures + expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint creation failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint creation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCheckpointDeletionFailures + expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint deletion failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint deletion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCompactionsFailed + expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB compactions failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB compactions failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbHeadTruncationsFailed + expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB head truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB head truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbReloadFailures + expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB reload failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB reload failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbWalCorruptions + expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL corruptions (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL corruptions\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbWalTruncationsFailed + expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" diff --git a/roles/monitoring/prometheus/server/tasks/main.yml b/roles/monitoring/prometheus/server/tasks/main.yml index 44f0800e..a70bd6fd 100644 --- a/roles/monitoring/prometheus/server/tasks/main.yml +++ b/roles/monitoring/prometheus/server/tasks/main.yml @@ -18,21 +18,6 @@ name: prom-server state: present -- name: create configuration directories - loop: - - jobs - - rules - - targets - file: - path: "/etc/prometheus/{{ item }}" - state: directory - -- name: create sub-directroy for all exporter types in jobs directory - loop: "{{ prometheus_server_jobs }}" - file: - path: "/etc/prometheus/jobs/{{ item }}" - state: directory - - name: add user for server user: name: prometheus @@ -50,6 +35,21 @@ - name: create TLS CA and certificates import_tasks: tls.yml +- name: create configuration directories + loop: + - jobs + - rules + - targets + file: + path: "/etc/prometheus/{{ item }}" + state: directory + +- name: create sub-directroy for all exporter types in jobs directory + loop: "{{ prometheus_server_jobs }}" + file: + path: "/etc/prometheus/jobs/{{ item }}" + state: directory + - name: generate targets config loop: "{{ prometheus_zone_targets }}" copy: @@ -68,10 +68,19 @@ path: "/etc/prometheus/jobs/{{ item.job }}/{{ item.target }}.yml" state: "{{ item.enabled | ternary('link', 'absent') }}" +- name: generate rules files for all jobs + loop: "{{ prometheus_server_jobs | union(['prometheus']) }}" + template: + src: rules.yml.j2 + dest: "/etc/prometheus/rules/{{ item }}.yml" + validate: "promtool check rules %s" + notify: reload prometheus + - name: generate configuration file template: src: prometheus.yml.j2 dest: /etc/prometheus/prometheus.yml + validate: "promtool check config %s" notify: reload prometheus - name: generate systemd service unit diff --git a/roles/monitoring/prometheus/server/templates/rules.yml.j2 b/roles/monitoring/prometheus/server/templates/rules.yml.j2 new file mode 100644 index 00000000..30576363 --- /dev/null +++ b/roles/monitoring/prometheus/server/templates/rules.yml.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +groups: + - name: {{ item }} + {{ {'rules': prometheus_server_rules[item]} | to_nice_yaml(indent=2, width=1337) | indent(4) }} -- cgit v1.2.3