summaryrefslogtreecommitdiff
path: root/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
diff options
context:
space:
mode:
authorChristian Pointner <equinox@spreadspace.org>2021-07-25 11:24:47 +0200
committerChristian Pointner <equinox@spreadspace.org>2021-07-25 11:24:47 +0200
commit83a9137a8450f5e8ec1ae828b3e3e3554b76b013 (patch)
tree701077b59e758fd9145f068e9f66dcf088ee62fd /roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
parentele-router: preapare setup for e21 (diff)
parentpromethues exporter: add TODO list (diff)
Merge branch 'topic/prometheus'
Diffstat (limited to 'roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml')
-rw-r--r--roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml239
1 files changed, 239 insertions, 0 deletions
diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
new file mode 100644
index 00000000..8d4672b1
--- /dev/null
+++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
@@ -0,0 +1,239 @@
+---
+## https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring
+prometheus_server_rules_prometheus_extra: []
+prometheus_server_rules_prometheus:
+ - alert: PrometheusJobMissing
+ expr: absent(up{job="prometheus"})
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusJobMissing
+ expr: absent(up{job="prometheus"})
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetMissing
+ expr: up == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus target missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAllTargetsMissing
+ expr: count by (job) (up) == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus all targets missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job does not have living target anymore.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusConfigurationReloadFailure
+ expr: prometheus_config_last_reload_successful != 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTooManyRestarts
+ expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus too many restarts (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusRuleEvaluationFailures
+ expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus rule evaluation failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTemplateTextExpansionFailures
+ expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus template text expansion failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} template text expansion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusRuleEvaluationSlow
+ expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus rule evaluation slow (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusNotificationsBacklog
+ expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus notifications backlog (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetEmpty
+ expr: prometheus_sd_discovered_targets == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus target empty (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetScrapingSlow
+ expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusLargeScrape
+ expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus large scrape (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetScrapeDuplicate
+ expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus target scrape duplicate (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCheckpointCreationFailures
+ expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB checkpoint creation failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint creation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCheckpointDeletionFailures
+ expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB checkpoint deletion failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint deletion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCompactionsFailed
+ expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB compactions failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB compactions failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbHeadTruncationsFailed
+ expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB head truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB head truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbReloadFailures
+ expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB reload failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB reload failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbWalCorruptions
+ expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB WAL corruptions (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL corruptions\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbWalTruncationsFailed
+ expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB WAL truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+
+prometheus_server_rules_prometheus_alertmanager:
+ - alert: PrometheusAlertmanagerConfigurationReloadFailure
+ expr: alertmanager_config_last_reload_successful != 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus AlertManager configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "AlertManager configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerConfigNotSynced
+ expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus AlertManager config not synced (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerE2eDeadManSwitch
+ expr: vector(1)
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus AlertManager E2E dead man switch (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusNotConnectedToAlertmanager
+ expr: prometheus_notifications_alertmanagers_discovered < 1
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus not connected to alertmanager (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus cannot connect the alertmanager\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerNotificationFailing
+ expr: rate(alertmanager_notifications_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus AlertManager notification failing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Alertmanager is failing sending notifications\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"