From 1a40395d35db76e1482bc32fb7a97e6a60c4b1dc Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Mon, 21 Jun 2021 00:23:51 +0200 Subject: promethues: initial support for alert rules --- .../server/defaults/main/rules_prometheus.yml | 192 +++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml (limited to 'roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml') diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml new file mode 100644 index 00000000..6d84efa4 --- /dev/null +++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml @@ -0,0 +1,192 @@ +--- +## https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring +prometheus_server_rules_prometheus_extra: [] +prometheus_server_rules_prometheus: + - alert: PrometheusJobMissing + expr: absent(up{job="prometheus"}) + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusJobMissing + expr: absent(up{job="prometheus"}) + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetMissing + expr: up == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusAllTargetsMissing + expr: count by (job) (up) == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus all targets missing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "A Prometheus job does not have living target anymore.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusConfigurationReloadFailure + expr: prometheus_config_last_reload_successful != 1 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTooManyRestarts + expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus too many restarts (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusRuleEvaluationFailures + expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus rule evaluation failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTemplateTextExpansionFailures + expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus template text expansion failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} template text expansion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusRuleEvaluationSlow + expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus rule evaluation slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusNotificationsBacklog + expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus notifications backlog (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetEmpty + expr: prometheus_sd_discovered_targets == 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target empty (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetScrapingSlow + expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60 + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusLargeScrape + expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus large scrape (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTargetScrapeDuplicate + expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus target scrape duplicate (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCheckpointCreationFailures + expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint creation failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint creation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCheckpointDeletionFailures + expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint deletion failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint deletion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbCompactionsFailed + expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB compactions failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB compactions failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbHeadTruncationsFailed + expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB head truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB head truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbReloadFailures + expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB reload failures (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB reload failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbWalCorruptions + expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL corruptions (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL corruptions\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusTsdbWalTruncationsFailed + expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" -- cgit v1.2.3 From 6cf380956bdd31292b4ccf51b1bbc217b93bf45f Mon Sep 17 00:00:00 2001 From: Christian Pointner Date: Wed, 23 Jun 2021 23:06:40 +0200 Subject: prometheus: connect server to alertmanager if configured --- inventory/host_vars/ch-mon.yml | 5 ++- .../prometheus/server/defaults/main/main.yml | 5 ++- .../server/defaults/main/rules_prometheus.yml | 47 ++++++++++++++++++++++ .../prometheus/server/templates/prometheus.yml.j2 | 13 ++++++ 4 files changed, 68 insertions(+), 2 deletions(-) (limited to 'roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml') diff --git a/inventory/host_vars/ch-mon.yml b/inventory/host_vars/ch-mon.yml index c0551768..111ffb55 100644 --- a/inventory/host_vars/ch-mon.yml +++ b/inventory/host_vars/ch-mon.yml @@ -61,6 +61,10 @@ prometheus_server_storage: size: 30G fs: ext4 +prometheus_server_alertmanager: + url: "127.0.0.1:9093" + + prometheus_exporters_extra: - blackbox @@ -73,7 +77,6 @@ promethues_alertmanager_smtp: from: "noreply@chaos-at-home.org" require_tls: no - grafana_secret_key: "{{ vault_grafana_secret_key }}" grafana_config_smtp: diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml index b10d6f17..8e7fea4b 100644 --- a/roles/monitoring/prometheus/server/defaults/main/main.yml +++ b/roles/monitoring/prometheus/server/defaults/main/main.yml @@ -9,5 +9,8 @@ prometheus_server_jobs: - node prometheus_server_rules: - prometheus: "{{ prometheus_server_rules_prometheus + prometheus_server_rules_prometheus_extra }}" + prometheus: "{{ prometheus_server_rules_prometheus + ((prometheus_server_alertmanager is defined) | ternary(prometheus_server_rules_prometheus_alertmanager, [])) + prometheus_server_rules_prometheus_extra }}" node: "{{ prometheus_server_rules_node + prometheus_server_rules_prometheus_extra }}" + +# prometheus_server_alertmanager: +# url: "127.0.0.1:9093" diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml index 6d84efa4..8d4672b1 100644 --- a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml +++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml @@ -190,3 +190,50 @@ prometheus_server_rules_prometheus: annotations: summary: Prometheus TSDB WAL truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + +prometheus_server_rules_prometheus_alertmanager: + - alert: PrometheusAlertmanagerConfigurationReloadFailure + expr: alertmanager_config_last_reload_successful != 1 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus AlertManager configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "AlertManager configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusAlertmanagerConfigNotSynced + expr: count(count_values("config_hash", alertmanager_config_hash)) > 1 + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus AlertManager config not synced (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusAlertmanagerE2eDeadManSwitch + expr: vector(1) + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus AlertManager E2E dead man switch (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusNotConnectedToAlertmanager + expr: prometheus_notifications_alertmanagers_discovered < 1 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus not connected to alertmanager (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Prometheus cannot connect the alertmanager\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" + + - alert: PrometheusAlertmanagerNotificationFailing + expr: rate(alertmanager_notifications_failed_total[1m]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus AlertManager notification failing (instance {{ '{{' }} $labels.instance {{ '}}' }}) + description: "Alertmanager is failing sending notifications\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" diff --git a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 index 3975c74d..c76990f4 100644 --- a/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 +++ b/roles/monitoring/prometheus/server/templates/prometheus.yml.j2 @@ -6,6 +6,13 @@ global: rule_files: - /etc/prometheus/rules/*.yml +{% if prometheus_server_alertmanager is defined %} + +alerting: + alertmanagers: + - static_configs: + - targets: ['{{ prometheus_server_alertmanager.url }}'] +{% endif %} scrape_configs: - job_name: 'prometheus' @@ -13,6 +20,12 @@ scrape_configs: - targets: ['localhost:9090'] labels: instance: "{{ inventory_hostname }}" +{% if prometheus_server_alertmanager is defined %} + + - job_name: 'alertmanager' + static_configs: + - targets: ['{{ prometheus_server_alertmanager.url }}'] +{% endif %} {% for job in prometheus_server_jobs %} - job_name: '{{ job }}' -- cgit v1.2.3