Rules

Selfmonitoring

12.473s ago

429us

Rule State Error Last Evaluation Evaluation Time
alert: SelfMonitoringAlwaysFiring expr: minute() >= 0 for: 1s labels: application: leonard_healthchecks severity: info ok 12.488s ago 381.1us

lowpref

3.665s ago

309.6us

Rule State Error Last Evaluation Evaluation Time
alert: LowGatewayPreference expr: gw_loadbalancing_pref{segment="1"} < 10 for: 1d labels: severity: page annotations: summary: | {{ .Labels.gateway }} has low gateway preference ({{ .Value }}) ok 3.665s ago 290.4us

BlackboxExporter

12.788s ago

2.429ms

Rule State Error Last Evaluation Evaluation Time
alert: BlackboxProbeFailed expr: probe_success == 0 for: 15m labels: severity: critical annotations: description: |- Probe failed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe failed (instance {{ $labels.instance }}) ok 12.788s ago 393.8us
alert: BlackboxConfigurationReloadFailure expr: blackbox_exporter_config_last_reload_successful != 1 labels: severity: warning annotations: description: |- Blackbox configuration reload failure VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox configuration reload failure (instance {{ $labels.instance }}) ok 12.788s ago 87.16us
alert: BlackboxSslCertificateWillExpireSoon expr: 3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20 labels: severity: warning annotations: description: |- SSL certificate expires in less than 20 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 12.788s ago 518.6us
alert: BlackboxSslCertificateWillExpireSoon expr: 0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3 labels: severity: critical annotations: description: |- SSL certificate expires in less than 3 days VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) ok 12.788s ago 406.5us
alert: BlackboxSslCertificateExpired expr: round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0 labels: severity: critical annotations: description: |- SSL certificate has expired already VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox SSL certificate expired (instance {{ $labels.instance }}) ok 12.787s ago 349.1us
alert: BlackboxProbeSlowHttp expr: avg_over_time(probe_http_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- HTTP request took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow HTTP (instance {{ $labels.instance }}) ok 12.787s ago 441.9us
alert: BlackboxProbeSlowPing expr: avg_over_time(probe_icmp_duration_seconds[1m]) > 1 for: 1m labels: severity: warning annotations: description: |- Blackbox ping took more than 1s VALUE = {{ $value }} LABELS = {{ $labels }} summary: Blackbox probe slow ping (instance {{ $labels.instance }}) ok 12.787s ago 154us

general

10.948s ago

1.929ms

Rule State Error Last Evaluation Evaluation Time
alert: UP_FAILED expr: up{ignore_down!="1"} < 1 for: 15m labels: application: prometheus severity: warning annotations: summary: Scrapes not functional ok 10.948s ago 1.229ms
alert: PROMETHEUS_RELOAD_FAILED expr: prometheus_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of prometheus config failed ok 10.947s ago 105.8us
alert: ALERTMANAGER_RELOAD_FAILED expr: alertmanager_config_last_reload_successful < 1 for: 1m labels: application: prometheus severity: warning annotations: summary: Reload of alertmanager config failed ok 10.947s ago 103.8us
alert: PROBE_FAILED_TCP expr: probe_success < 1 for: 15m labels: severity: warning annotations: summary: Blackbox probe failed ok 10.947s ago 310.9us
alert: AlertmanagerClusterPeers expr: alertmanager_cluster_members < 2 for: 15m labels: severity: warning annotations: summary: Alertmanager cluster has too few members ok 10.947s ago 108.8us

NodeExporter

12.207s ago

219.2ms

Rule State Error Last Evaluation Evaluation Time
alert: OsVersionUnknown expr: up{job="node"} unless on (instance) node_os_version{job="node"} for: 1s labels: severity: audit annotations: description: Os-Version could not be determined for {{ $labels.instance }}, this is expected behaviour for Debian < 12, so it it is at least Debian untable summary: Os-Version could not be determined for {{ $labels.instance }} ok 12.207s ago 1.399ms
alert: HostOutOfMemory expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Node memory is filling up (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of memory (instance {{ $labels.instance }}) ok 12.206s ago 2.019ms
alert: HostMemoryUnderMemoryPressure expr: (rate(node_vmstat_pgmajfault[1m]) > 2000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 30m labels: severity: warning annotations: description: |- The node is under heavy memory pressure. High rate of major page faults VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host memory under memory pressure (instance {{ $labels.instance }}) ok 12.204s ago 761.2us
alert: HostOutOfDiskSpace expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost full (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of disk space (instance {{ $labels.instance }}) ok 12.204s ago 6.308ms
alert: HostDiskWillFillIn24Hours expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and on (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of space within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) ok 12.197s ago 14.21ms
alert: HostOutOfInodes expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk is almost running out of available inodes (< 10% left) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host out of inodes (instance {{ $labels.instance }}) ok 12.183s ago 5.923ms
alert: HostFilesystemDeviceError expr: node_filesystem_device_error == 1 for: 2m labels: severity: critical annotations: description: |- {{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host filesystem device error (instance {{ $labels.instance }}) ok 12.178s ago 1.427ms
alert: HostInodesWillFillIn24Hours expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and on (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Filesystem is predicted to run out of inodes within the next 24 hours at current write rate VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }}) ok 12.177s ago 14.54ms
alert: HostUnusualDiskReadLatency expr: (rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Disk latency is growing (read operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk read latency (instance {{ $labels.instance }}) ok 12.162s ago 2.193ms
alert: HostUnusualDiskWriteLatency expr: (rate(node_disk_write_time_seconds_total{nodename!="gw05n02"}[1m]) / rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{nodename!="gw05n02"}[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 5m labels: severity: warning annotations: description: |- Disk latency is growing (write operations > 100ms) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk write latency (instance {{ $labels.instance }}) ok 12.16s ago 2.236ms
alert: HostCpuStealNoisyNeighbor expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) ok 12.158s ago 15.07ms
alert: HostUnusualDiskIo expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on (instance) group_left (nodename) node_uname_info{nodename!="gw05n02"} for: 15m labels: severity: warning annotations: description: |- Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host unusual disk IO (instance {{ $labels.instance }}) ok 12.143s ago 1.262ms
alert: HostSwapIsFillingUp expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Swap is filling up (>80%) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host swap is filling up (instance {{ $labels.instance }}) ok 12.142s ago 1.592ms
alert: HostSystemdServiceCrashed expr: (node_systemd_unit_state{state="failed"} == 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- systemd service crashed VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host systemd service crashed (instance {{ $labels.instance }}) ok 12.141s ago 56.4ms
alert: CpuTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip=~"pci0000:00_0000:00:18_3",label!="tctl"} > 98)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 12.085s ago 1.151ms
alert: HostPhysicalComponentTooHot expr: ((node_hwmon_temp_celsius * ignoring (label) group_left (instance, job, node, sensor) node_hwmon_sensor_label{chip!="pci0000:00_0000:00:18_3",label!="tctl"} > 75)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Physical hardware component too hot VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host physical component too hot (instance {{ $labels.instance }}) ok 12.084s ago 1.05ms
alert: HostNodeOvertemperatureAlarm expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- Physical node temperature alarm triggered VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host node overtemperature alarm (instance {{ $labels.instance }}) ok 12.083s ago 698.3us
alert: HostRaidArrayGotInactive expr: (node_md_state{state="inactive"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: critical annotations: description: |- RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID array got inactive (instance {{ $labels.instance }}) ok 12.082s ago 578.3us
alert: HostRaidDiskFailure expr: (node_md_disks{state="failed"} > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host RAID disk failure (instance {{ $labels.instance }}) ok 12.082s ago 600.3us
alert: HostKernelVersionDeviations expr: (count(sum by (kernel) (label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*"))) > 1) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 6h labels: severity: warning annotations: description: |- Different kernel versions are running VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host kernel version deviations (instance {{ $labels.instance }}) ok 12.081s ago 1.189ms
alert: HostOomKillDetected expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- OOM kill detected VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host OOM kill detected (instance {{ $labels.instance }}) ok 12.08s ago 776.2us
alert: HostEdacCorrectableErrorsDetected expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: info annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }}) ok 12.08s ago 1.009ms
alert: HostEdacUncorrectableErrorsDetected expr: (node_edac_uncorrectable_errors_total > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} labels: severity: warning annotations: description: |- Host {{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }}) ok 12.079s ago 919.6us
alert: HostNetworkReceiveErrors expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Receive Errors (instance {{ $labels.instance }}) ok 12.078s ago 26.61ms
alert: HostNetworkTransmitErrors expr: (rate(node_network_transmit_errs_total{device!~"^g09n03abbtesta|^g09n03amobrtra|^g09n03bbbtestb"}[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Transmit Errors (instance {{ $labels.instance }}) ok 12.052s ago 26.81ms
alert: HostNetworkInterfaceSaturated expr: ((rate(node_network_receive_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"}[1m])) / node_network_speed_bytes{device!~"^bb.*|^tap.*|^vnet.*|^veth.*|^tun.*|^vp.*"} > 0.8 < 10000) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 15m labels: severity: warning annotations: description: |- The network interface "{{ $labels.device }}" on "{{ $labels.instance }}" is getting overloaded. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Interface Saturated (instance {{ $labels.instance }}) ok 12.025s ago 22.96ms
alert: HostNetworkBondDegraded expr: ((node_bonding_active - node_bonding_slaves) != 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Bond "{{ $labels.device }}" degraded on "{{ $labels.instance }}". VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host Network Bond Degraded (instance {{ $labels.instance }}) ok 12.002s ago 682us
alert: HostConntrackLimit expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 5m labels: severity: warning annotations: description: |- The number of conntrack is approaching limit VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host conntrack limit (instance {{ $labels.instance }}) ok 12.002s ago 1.591ms
alert: HostClockSkew expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 10m labels: severity: warning annotations: description: |- Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock skew (instance {{ $labels.instance }}) ok 12s ago 2.767ms
alert: HostClockNotSynchronising expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 2m labels: severity: warning annotations: description: |- Clock not synchronising. Ensure NTP is configured on this host. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host clock not synchronising (instance {{ $labels.instance }}) ok 11.998s ago 1.633ms
alert: HostRequiresReboot expr: (node_reboot_required > 0) * on (instance) group_left (nodename) node_uname_info{nodename=~".+"} for: 4h labels: severity: info annotations: description: |- {{ $labels.instance }} requires a reboot. VALUE = {{ $value }} LABELS = {{ $labels }} summary: Host requires reboot (instance {{ $labels.instance }}) ok 11.996s ago 2.695ms

postfix_smtp_status_deferred

13.728s ago

300.3us

Rule State Error Last Evaluation Evaluation Time
alert: Mail stuck in queue expr: postfix_showq_message_age_seconds_count > 0 for: 1h10m labels: application: mail severity: warning annotations: summary: Mail on {{ $labels.instance }} has mails stuck in queue ok 13.728s ago 281.6us

pve-guest-alerts

9.331s ago

3.99ms

Rule State Error Last Evaluation Evaluation Time
alert: GuestRunningWithoutOnboot expr: (pve_up{id=~".+"} == 1) * on (id) group_left () (pve_onboot_status{id=~".+"} or on (id) vector(0)) != 1 for: 10m labels: severity: info annotations: description: Guest {{ $labels.id }} läuft, hat aber kein onboot gesetzt. Prüfen, ob gewünscht. summary: Guest {{ $labels.id }} läuft ohne onboot ok 9.331s ago 2.358ms
alert: GuestNotRunningButOnboot expr: (pve_up{id=~".+"} == 0) * on (id) group_left () (pve_onboot_status{id=~".+"} == 1) for: 10m labels: severity: critical annotations: description: Guest {{ $labels.id }} hat onboot=1 gesetzt, aber läuft nicht. Sollte automatisch starten. summary: Guest {{ $labels.id }} läuft nicht, aber onboot=1 ok 9.329s ago 1.607ms

SmartctlExporter

8.935s ago

831.5us

Rule State Error Last Evaluation Evaluation Time
alert: SmartDeviceTemperatureWarning expr: smartctl_device_temperature > 60 for: 2m labels: severity: warning annotations: description: |- Device temperature warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature warning (instance {{ $labels.instance }}) ok 8.935s ago 236.9us
alert: SmartDeviceTemperatureCritical expr: smartctl_device_temperature > 80 for: 2m labels: severity: critical annotations: description: |- Device temperature critical (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart device temperature critical (instance {{ $labels.instance }}) ok 8.935s ago 100.9us
alert: SmartCriticalWarning expr: smartctl_device_critical_warning > 0 for: 15m labels: severity: critical annotations: description: |- device has critical warning (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart critical warning (instance {{ $labels.instance }}) ok 8.935s ago 68.31us
alert: SmartMediaErrors expr: smartctl_device_media_errors > 0 for: 15m labels: severity: critical annotations: description: |- device has media errors (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart media errors (instance {{ $labels.instance }}) ok 8.935s ago 65.08us
alert: SmartNvmeWearoutIndicator expr: smartctl_device_available_spare{device=~"nvme.*"} < smartctl_device_available_spare_threshold{device=~"nvme.*"} for: 15m labels: severity: critical annotations: description: |- NVMe device is wearing out (instance {{ $labels.instance }}) VALUE = {{ $value }} LABELS = {{ $labels }} summary: Smart NVME Wearout Indicator (instance {{ $labels.instance }}) ok 8.935s ago 323.9us