summaryrefslogtreecommitdiff
path: root/roles/monitoring/prometheus/server/defaults
diff options
context:
space:
mode:
Diffstat (limited to 'roles/monitoring/prometheus/server/defaults')
-rw-r--r--roles/monitoring/prometheus/server/defaults/main.yml6
-rw-r--r--roles/monitoring/prometheus/server/defaults/main/main.yml20
-rw-r--r--roles/monitoring/prometheus/server/defaults/main/rules_node.yml219
-rw-r--r--roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml239
4 files changed, 478 insertions, 6 deletions
diff --git a/roles/monitoring/prometheus/server/defaults/main.yml b/roles/monitoring/prometheus/server/defaults/main.yml
deleted file mode 100644
index b5d13b5d..00000000
--- a/roles/monitoring/prometheus/server/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# prometheus_server_storage:
-# type: (zfs|lvm)
-# ...
-
-prometheus_server_retention: "15d"
diff --git a/roles/monitoring/prometheus/server/defaults/main/main.yml b/roles/monitoring/prometheus/server/defaults/main/main.yml
new file mode 100644
index 00000000..c9291172
--- /dev/null
+++ b/roles/monitoring/prometheus/server/defaults/main/main.yml
@@ -0,0 +1,20 @@
+---
+# prometheus_server_storage:
+# type: (zfs|lvm)
+# ...
+
+prometheus_server_retention: "15d"
+
+prometheus_server_jobs:
+ - node
+
+prometheus_server_rules:
+ prometheus: "{{ prometheus_server_rules_prometheus + ((prometheus_server_alertmanager is defined) | ternary(prometheus_server_rules_prometheus_alertmanager, [])) + prometheus_server_rules_prometheus_extra }}"
+ node: "{{ prometheus_server_rules_node + prometheus_server_rules_prometheus_extra }}"
+
+# prometheus_server_alertmanager:
+# url: "127.0.0.1:9093"
+# path_prefix: /
+
+prometheus_server_web_listen_address: 127.0.0.1:9090
+# prometheus_server_web_external_url: /prometheus/
diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_node.yml b/roles/monitoring/prometheus/server/defaults/main/rules_node.yml
new file mode 100644
index 00000000..ab7317ac
--- /dev/null
+++ b/roles/monitoring/prometheus/server/defaults/main/rules_node.yml
@@ -0,0 +1,219 @@
+---
+## https://awesome-prometheus-alerts.grep.to/rules#host-and-hardware
+prometheus_server_rules_node_extra: []
+prometheus_server_rules_node:
+ - alert: HostOutOfMemory
+ expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host out of memory (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Node memory is filling up (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostMemoryUnderMemoryPressure
+ expr: rate(node_vmstat_pgmajfault[1m]) > 1000
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host memory under memory pressure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostOutOfDiskSpace
+ expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host out of disk space (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Disk is almost full (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostDiskWillFillIn24Hours
+ expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host disk will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostOutOfInodes
+ expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host out of inodes (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostInodesWillFillIn24Hours
+ expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{mountpoint="/rootfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host inodes will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostUnusualDiskReadLatency
+ expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk read latency (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostUnusualDiskWriteLatency
+ expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host unusual disk write latency (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostHighCpuLoad
+ expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host high CPU load (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "CPU load is > 80%\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostCpuStealNoisyNeighbor
+ expr: avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host CPU steal noisy neighbor (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostSystemdServiceCrashed
+ expr: node_systemd_unit_state{state="failed"} == 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host systemd service crashed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "systemd service crashed\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostPhysicalComponentTooHot
+ expr: node_hwmon_temp_celsius > 75
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host physical component too hot (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Physical hardware component too hot\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostNodeOvertemperatureAlarm
+ expr: node_hwmon_temp_crit_alarm_celsius == 1
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host node overtemperature alarm (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Physical node temperature alarm triggered\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostRaidArrayGotInactive
+ expr: node_md_state{state="inactive"} > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Host RAID array got inactive (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "RAID array {{ '{{' }} $labels.device {{ '}}' }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostRaidDiskFailure
+ expr: node_md_disks{state="failed"} > 0
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host RAID disk failure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "At least one device in RAID array on {{ '{{' }} $labels.instance {{ '}}' }} failed. Array {{ '{{' }} $labels.md_device {{ '}}' }} needs attention and possibly a disk swap\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostOomKillDetected
+ expr: increase(node_vmstat_oom_kill[1m]) > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host OOM kill detected (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "OOM kill detected\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostEdacCorrectableErrorsDetected
+ expr: increase(node_edac_correctable_errors_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: info
+ annotations:
+ summary: Host EDAC Correctable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostEdacUncorrectableErrorsDetected
+ expr: node_edac_uncorrectable_errors_total > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host EDAC Uncorrectable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostNetworkReceiveErrors
+ expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Receive Errors (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} receive errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostNetworkTransmitErrors
+ expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Transmit Errors (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} transmit errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostNetworkInterfaceSaturated
+ expr: (rate(node_network_receive_bytes_total{device!~"^tap.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*"} > 0.8
+ for: 1m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host Network Interface Saturated (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "The network interface \"{{ '{{' }} $labels.interface {{ '}}' }}\" on \"{{ '{{' }} $labels.instance {{ '}}' }}\" is getting overloaded.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostConntrackLimit
+ expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host conntrack limit (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "The number of conntrack is approching limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostClockSkew
+ expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host clock skew (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Clock skew detected. Clock is out of sync.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: HostClockNotSynchronising
+ expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: Host clock not synchronising (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Clock not synchronising.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
diff --git a/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
new file mode 100644
index 00000000..8d4672b1
--- /dev/null
+++ b/roles/monitoring/prometheus/server/defaults/main/rules_prometheus.yml
@@ -0,0 +1,239 @@
+---
+## https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring
+prometheus_server_rules_prometheus_extra: []
+prometheus_server_rules_prometheus:
+ - alert: PrometheusJobMissing
+ expr: absent(up{job="prometheus"})
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusJobMissing
+ expr: absent(up{job="prometheus"})
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus job missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job has disappeared\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetMissing
+ expr: up == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus target missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAllTargetsMissing
+ expr: count by (job) (up) == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus all targets missing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "A Prometheus job does not have living target anymore.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusConfigurationReloadFailure
+ expr: prometheus_config_last_reload_successful != 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTooManyRestarts
+ expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus too many restarts (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusRuleEvaluationFailures
+ expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus rule evaluation failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTemplateTextExpansionFailures
+ expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus template text expansion failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} template text expansion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusRuleEvaluationSlow
+ expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus rule evaluation slow (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusNotificationsBacklog
+ expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus notifications backlog (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetEmpty
+ expr: prometheus_sd_discovered_targets == 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus target empty (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has no target in service discovery\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetScrapingSlow
+ expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus target scraping slow (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus is scraping exporters slowly\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusLargeScrape
+ expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus large scrape (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTargetScrapeDuplicate
+ expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus target scrape duplicate (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCheckpointCreationFailures
+ expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB checkpoint creation failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint creation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCheckpointDeletionFailures
+ expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB checkpoint deletion failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} checkpoint deletion failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbCompactionsFailed
+ expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB compactions failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB compactions failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbHeadTruncationsFailed
+ expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB head truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB head truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbReloadFailures
+ expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB reload failures (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB reload failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbWalCorruptions
+ expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB WAL corruptions (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL corruptions\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusTsdbWalTruncationsFailed
+ expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus TSDB WAL truncations failed (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus encountered {{ '{{' }} $value {{ '}}' }} TSDB WAL truncation failures\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+
+prometheus_server_rules_prometheus_alertmanager:
+ - alert: PrometheusAlertmanagerConfigurationReloadFailure
+ expr: alertmanager_config_last_reload_successful != 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus AlertManager configuration reload failure (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "AlertManager configuration reload error\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerConfigNotSynced
+ expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ summary: Prometheus AlertManager config not synced (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerE2eDeadManSwitch
+ expr: vector(1)
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus AlertManager E2E dead man switch (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusNotConnectedToAlertmanager
+ expr: prometheus_notifications_alertmanagers_discovered < 1
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus not connected to alertmanager (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Prometheus cannot connect the alertmanager\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"
+
+ - alert: PrometheusAlertmanagerNotificationFailing
+ expr: rate(alertmanager_notifications_failed_total[1m]) > 0
+ for: 0m
+ labels:
+ severity: critical
+ annotations:
+ summary: Prometheus AlertManager notification failing (instance {{ '{{' }} $labels.instance {{ '}}' }})
+ description: "Alertmanager is failing sending notifications\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"