--- ## https://awesome-prometheus-alerts.grep.to/rules#host-and-hardware prometheus_server_rules_node_extra: [] prometheus_server_rules_node: - alert: HostOutOfMemory expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10 for: 2m labels: severity: warning annotations: summary: Host out of memory (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Node memory is filling up (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostMemoryUnderMemoryPressure expr: rate(node_vmstat_pgmajfault[1m]) > 1000 for: 2m labels: severity: warning annotations: summary: Host memory under memory pressure (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostMemoryHardwareCorrupted expr: node_memory_HardwareCorrupted_bytes > 0 labels: severity: warning annotations: summary: Host memory is corrupted (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The node reports {{ '{{' }} $value {{ '}}' }} bytes of corrupted memory.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostOutOfDiskSpace expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 for: 2m labels: severity: warning annotations: summary: Host out of disk space (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostDiskWillFillIn24Hours expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 for: 2m labels: severity: warning annotations: summary: Host disk will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostOutOfInodes expr: node_filesystem_files_free{mountpoint="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 for: 2m labels: severity: warning annotations: summary: Host out of inodes (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostInodesWillFillIn24Hours expr: node_filesystem_files_free{mountpoint="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{mountpoint="/rootfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0 for: 2m labels: severity: warning annotations: summary: Host inodes will fill in 24 hours (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostUnusualDiskReadLatency expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.2 and rate(node_disk_reads_completed_total[1m]) > 0 for: 5m labels: severity: warning annotations: summary: Host unusual disk read latency (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostUnusualDiskWriteLatency expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.2 and rate(node_disk_writes_completed_total[1m]) > 0 for: 5m labels: severity: warning annotations: summary: Host unusual disk write latency (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostHighCpuLoad expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80 for: 15m labels: severity: warning annotations: summary: Host high CPU load (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "CPU load is > 80%\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostCpuStealNoisyNeighbor expr: avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10 for: 0m labels: severity: warning annotations: summary: Host CPU steal noisy neighbor (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostSystemdNotRunning expr: node_systemd_system_running == 0 for: 0m labels: severity: warning annotations: summary: Host systemd is not in running state (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "systemd is not in running state.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostSystemdServiceCrashed expr: node_systemd_unit_state{state="failed"} == 1 for: 0m labels: severity: warning annotations: summary: Host systemd service crashed (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The systemd service unit {{ '{{' }} $labels.name {{ '}}' }} is in failed state.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostPhysicalComponentTooHot expr: node_hwmon_temp_celsius > 85 for: 5m labels: severity: warning annotations: summary: Host physical component too hot (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Physical hardware component too hot\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNodeOvertemperatureAlarm expr: node_hwmon_temp_crit_alarm_celsius == 1 for: 0m labels: severity: critical annotations: summary: Host node overtemperature alarm (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Physical node temperature alarm triggered\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostRaidArrayGotInactive expr: node_md_state{state="inactive"} > 0 for: 0m labels: severity: critical annotations: summary: Host RAID array got inactive (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "RAID array {{ '{{' }} $labels.device {{ '}}' }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostRaidDiskFailure expr: node_md_disks{state="failed"} > 0 for: 2m labels: severity: warning annotations: summary: Host RAID disk failure (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "At least one device in RAID array on {{ '{{' }} $labels.instance {{ '}}' }} failed. Array {{ '{{' }} $labels.md_device {{ '}}' }} needs attention and possibly a disk swap\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostOomKillDetected expr: increase(node_vmstat_oom_kill[1m]) > 0 for: 0m labels: severity: warning annotations: summary: Host OOM kill detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "OOM kill detected\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostEdacCorrectableErrorsDetected expr: increase(node_edac_correctable_errors_total[1m]) > 0 for: 0m labels: severity: warning annotations: summary: Host EDAC Correctable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostEdacUncorrectableErrorsDetected expr: node_edac_uncorrectable_errors_total > 0 for: 0m labels: severity: critical annotations: summary: Host EDAC Uncorrectable Errors detected (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has had {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNetworkReceiveErrors expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 for: 2m labels: severity: warning annotations: summary: Host Network Receive Errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} receive errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNetworkTransmitErrors expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 for: 2m labels: severity: warning annotations: summary: Host Network Transmit Errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} interface {{ '{{' }} $labels.device {{ '}}' }} has encountered {{ '{{' }} printf \"%.0f\" $value {{ '}}' }} transmit errors in the last five minutes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNetworkInterfaceSaturated expr: (rate(node_network_receive_bytes_total{device!~"^(tap|vnet).*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^(tap|vnet).*"}[1m])) / node_network_speed_bytes{device!~"^(tap|vnet).*"} > 0.95 for: 1m labels: severity: warning annotations: summary: Host Network Interface Saturated (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The network interface \"{{ '{{' }} $labels.device {{ '}}' }}\" on \"{{ '{{' }} $labels.instance {{ '}}' }}\" is getting overloaded.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostConntrackLimit expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8 for: 5m labels: severity: warning annotations: summary: Host conntrack limit (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The number of conntrack is approching limit\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNetworkBondDegraded expr: (node_bonding_active - node_bonding_slaves) != 0 for: 1m labels: severity: warning annotations: title: Bond is degraded on (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Bond \"{{ '{{' }} $labels.master {{ '}}' }}\" on \"{{ '{{' }} $labels.instance {{ '}}' }}\" is degraded\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostClockSkew expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0) for: 2m labels: severity: warning annotations: summary: Host clock skew (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Clock skew detected. Clock is out of sync.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostClockNotSynchronising expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16 for: 2m labels: severity: warning annotations: summary: Host clock not synchronising (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Clock not synchronising.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: ZpoolStateDegraded expr: node_zfs_zpool_state{state="degraded"} == 1 for: 0m labels: severity: warning annotations: summary: ZFS zpool is degraded (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The ZFS zpool {{ '{{' }} $labels.zpool {{ '}}' }} is degraded.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: ZpoolStateFaulted expr: node_zfs_zpool_state{state="faulted"} == 1 for: 0m labels: severity: critical annotations: summary: ZFS zpool is faulted (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The ZFS zpool {{ '{{' }} $labels.zpool {{ '}}' }} is faulted.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" ### textfile-collectors - alert: NodeTextfileScrapeFailed expr: node_textfile_scrape_error != 0 for: 0m labels: severity: warning annotations: summary: Scraping metrics from textfiles failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The textfile collector failed to scrape at least one file\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: NodeTextfileMetricsOutdated expr: (time() - node_textfile_mtime_seconds{file!~".*/(git-fsck-|syncoid-pull-).*.prom"}) > 3600 for: 0m labels: severity: warning annotations: summary: Metrics from a textfile collector are too old (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The exported values from textfile {{ '{{' }} $labels.file {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }} are {{ '{{' }} $value {{ '}}' }} seconds old.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" ## textfile-collector: apt - alert: AptUpgradesPending expr: sum by (instance) (apt_upgrades_pending) > 0 for: 0m labels: severity: warning annotations: summary: Host has upgradeable packages (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has {{ '{{' }} $value {{ '}}' }} upgradable packages.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: AptAutoremovePending expr: sum by (instance) (apt_autoremove_pending) > 0 for: 0m labels: severity: warning annotations: summary: Host has packages that can be autoremoved (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} has {{ '{{' }} $value {{ '}}' }} packages that can be autoremoved.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: HostNeedsRebooting expr: node_reboot_required > 0 for: 0m labels: severity: warning annotations: summary: Host must be rebootet (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "Host {{ '{{' }} $labels.instance {{ '}}' }} must be rebootet for security uppdates to take effect.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" ## textfile-collector: deleted-libraries - alert: DeletedLibraryInUse expr: node_processes_linking_deleted_libraries for: 2m labels: severity: warning annotations: summary: Some processes still use a deleted library (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The deleted library {{ '{{' }} $labels.library_name {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }} is still in use by {{ '{{' }} $value {{ '}}' }} processes.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" ## textfile-collector: smartmon - alert: SmartmonDeviceUnhealthy expr: smartmon_device_smart_healthy == 0 for: 0m labels: severity: warning annotations: summary: Host disks are unhealthy (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports unhealthy device {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonDeviceErrors expr: smartmon_device_errors > 0 for: 0m labels: severity: warning annotations: summary: Host disk reports S.M.A.R.T. errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports errors for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" # selection of S.M.A.R.T. metrics based on: https://www.backblaze.com/blog/hard-drive-smart-stats/ - alert: SmartmonReallocatedSectorsCount expr: delta(smartmon_attr_raw_value{name="reallocated_sector_ct"}[72h]) > 0 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports reallocated sectors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports reallocated sectors within last 72 hours for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonReallocatedSectorsCountHigh expr: smartmon_attr_raw_value{name="reallocated_sector_ct"} > 100 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports more than 100 reallocated sectors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports more than 100 reallocated sectors for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonReportedUncorrectableErrors expr: delta(smartmon_attr_raw_value{name="reported_uncorrect"}[72h]) > 0 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports uncorrectable errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports uncorrectable errors within last 72 hours for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonCommandTimeouts expr: delta(smartmon_attr_raw_value{name="command_timeout"}[72h]) > 0 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports command timeouts (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports command timeouts within last 72 hours for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonCurrentPendingSectors expr: smartmon_attr_raw_value{name="current_pending_sector"} > 0 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports current pending sectors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports current pending sectors for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SmartmonOfflineUncorrectable expr: smartmon_attr_raw_value{name="offline_uncorrectable"} > 0 for: 0m labels: severity: warning annotations: summary: Host disk S.M.A.R.T. reports offline uncorrectable errors (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "S.M.A.R.T. reports offline uncorrectable errors for disk {{ '{{' }} $labels.device {{ '}}' }} on host {{ '{{' }} $labels.instance {{ '}}' }}, the drive might be failing.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" ## syncoid-pull scripts - alert: SyncoidPullJobTooLongAgo expr: time() - syncoid_pull_run > (24 * 3600) for: 0m labels: severity: warning annotations: summary: The last syncoid pull job was too long ago (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The last syncoid-based backup job of {{ '{{' }} $labels.instance {{ '}}' }} from {{ '{{' }} $labels.backup_server {{ '}}' }} ran more then 24 hours ago.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}" - alert: SyncoidPullJobFailed expr: syncoid_pull_exit_code != 0 for: 0m labels: severity: warning annotations: summary: The last syncoid pull job failed (instance {{ '{{' }} $labels.instance {{ '}}' }}) description: "The last syncoid-based backup job of {{ '{{' }} $labels.instance {{ '}}' }} from {{ '{{' }} $labels.backup_server {{ '}}' }} has failed.\n VALUE = {{ '{{' }} $value {{ '}}' }}\n LABELS = {{ '{{' }} $labels {{ '}}' }}"