# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 2.9373e-05 go_gc_duration_seconds{quantile="0.25"} 3.7128e-05 go_gc_duration_seconds{quantile="0.5"} 4.2671e-05 go_gc_duration_seconds{quantile="0.75"} 5.542e-05 go_gc_duration_seconds{quantile="1"} 0.000167936 go_gc_duration_seconds_sum 2.466059509 go_gc_duration_seconds_count 35813 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 7 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.20.4"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 2.360168e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 2.1602083536e+10 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 1.881136e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 2.06949457e+08 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 8.451392e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 2.360168e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 4.096e+06 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 3.833856e+06 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 16364 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 2.736128e+06 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 7.929856e+06 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.7349252248880072e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 2.06965821e+08 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 1200 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 15600 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 66560 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 81600 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 4.988792e+06 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 708584 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 458752 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 458752 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 1.952692e+07 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 5 # HELP node_arp_entries ARP entries by device # TYPE node_arp_entries gauge node_arp_entries{device="eth0"} 1 # HELP node_boot_time_seconds Node boot time, in unixtime. # TYPE node_boot_time_seconds gauge node_boot_time_seconds 1.730267585e+09 # HELP node_context_switches_total Total number of context switches. # TYPE node_context_switches_total counter node_context_switches_total 3.4347397198e+10 # HELP node_cooling_device_cur_state Current throttle state of the cooling device # TYPE node_cooling_device_cur_state gauge node_cooling_device_cur_state{name="0",type="Processor"} 0 node_cooling_device_cur_state{name="1",type="Processor"} 0 node_cooling_device_cur_state{name="2",type="Processor"} 0 node_cooling_device_cur_state{name="3",type="Processor"} 0 # HELP node_cooling_device_max_state Maximum throttle state of the cooling device # TYPE node_cooling_device_max_state gauge node_cooling_device_max_state{name="0",type="Processor"} 0 node_cooling_device_max_state{name="1",type="Processor"} 0 node_cooling_device_max_state{name="2",type="Processor"} 0 node_cooling_device_max_state{name="3",type="Processor"} 0 # HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. # TYPE node_cpu_guest_seconds_total counter node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0 node_cpu_guest_seconds_total{cpu="0",mode="user"} 0 node_cpu_guest_seconds_total{cpu="1",mode="nice"} 0 node_cpu_guest_seconds_total{cpu="1",mode="user"} 0 node_cpu_guest_seconds_total{cpu="2",mode="nice"} 0 node_cpu_guest_seconds_total{cpu="2",mode="user"} 0 node_cpu_guest_seconds_total{cpu="3",mode="nice"} 0 node_cpu_guest_seconds_total{cpu="3",mode="user"} 0 # HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. # TYPE node_cpu_seconds_total counter node_cpu_seconds_total{cpu="0",mode="idle"} 4.36394514e+06 node_cpu_seconds_total{cpu="0",mode="iowait"} 570.75 node_cpu_seconds_total{cpu="0",mode="irq"} 0 node_cpu_seconds_total{cpu="0",mode="nice"} 2.58 node_cpu_seconds_total{cpu="0",mode="softirq"} 22910.6 node_cpu_seconds_total{cpu="0",mode="steal"} 27451.13 node_cpu_seconds_total{cpu="0",mode="system"} 44482.23 node_cpu_seconds_total{cpu="0",mode="user"} 97026.74 node_cpu_seconds_total{cpu="1",mode="idle"} 4.20363551e+06 node_cpu_seconds_total{cpu="1",mode="iowait"} 615.78 node_cpu_seconds_total{cpu="1",mode="irq"} 0 node_cpu_seconds_total{cpu="1",mode="nice"} 3.24 node_cpu_seconds_total{cpu="1",mode="softirq"} 98470.91 node_cpu_seconds_total{cpu="1",mode="steal"} 37251.75 node_cpu_seconds_total{cpu="1",mode="system"} 32235.75 node_cpu_seconds_total{cpu="1",mode="user"} 69752.23 node_cpu_seconds_total{cpu="2",mode="idle"} 4.33864693e+06 node_cpu_seconds_total{cpu="2",mode="iowait"} 584.96 node_cpu_seconds_total{cpu="2",mode="irq"} 0 node_cpu_seconds_total{cpu="2",mode="nice"} 2.83 node_cpu_seconds_total{cpu="2",mode="softirq"} 7408.34 node_cpu_seconds_total{cpu="2",mode="steal"} 30293.42 node_cpu_seconds_total{cpu="2",mode="system"} 45610.16 node_cpu_seconds_total{cpu="2",mode="user"} 101382.41 node_cpu_seconds_total{cpu="3",mode="idle"} 4.36041461e+06 node_cpu_seconds_total{cpu="3",mode="iowait"} 573.26 node_cpu_seconds_total{cpu="3",mode="irq"} 0 node_cpu_seconds_total{cpu="3",mode="nice"} 3.75 node_cpu_seconds_total{cpu="3",mode="softirq"} 2487.38 node_cpu_seconds_total{cpu="3",mode="steal"} 27260.76 node_cpu_seconds_total{cpu="3",mode="system"} 44886.76 node_cpu_seconds_total{cpu="3",mode="user"} 98515.59 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="vda"} 2.584 node_disk_discard_time_seconds_total{device="vdb"} 0 # HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. # TYPE node_disk_discarded_sectors_total counter node_disk_discarded_sectors_total{device="vda"} 3.26251596e+08 node_disk_discarded_sectors_total{device="vdb"} 0 # HELP node_disk_discards_completed_total The total number of discards completed successfully. # TYPE node_disk_discards_completed_total counter node_disk_discards_completed_total{device="vda"} 16123 node_disk_discards_completed_total{device="vdb"} 0 # HELP node_disk_discards_merged_total The total number of discards merged. # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="vda"} 0 node_disk_discards_merged_total{device="vdb"} 0 # HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. # TYPE node_disk_flush_requests_time_seconds_total counter node_disk_flush_requests_time_seconds_total{device="vda"} 1471.422 node_disk_flush_requests_time_seconds_total{device="vdb"} 0 # HELP node_disk_flush_requests_total The total number of flush requests completed successfully # TYPE node_disk_flush_requests_total counter node_disk_flush_requests_total{device="vda"} 2.0792e+06 node_disk_flush_requests_total{device="vdb"} 0 # HELP node_disk_info Info of /sys/block/. # TYPE node_disk_info gauge node_disk_info{device="vda",major="254",minor="0",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="vdb",major="254",minor="16",model="",path="",revision="",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="vda"} 0 node_disk_io_now{device="vdb"} 0 # HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. # TYPE node_disk_io_time_seconds_total counter node_disk_io_time_seconds_total{device="vda"} 10361.752 node_disk_io_time_seconds_total{device="vdb"} 0.056 # HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. # TYPE node_disk_io_time_weighted_seconds_total counter node_disk_io_time_weighted_seconds_total{device="vda"} 8082.564 node_disk_io_time_weighted_seconds_total{device="vdb"} 0.009000000000000001 # HELP node_disk_read_bytes_total The total number of bytes read successfully. # TYPE node_disk_read_bytes_total counter node_disk_read_bytes_total{device="vda"} 5.79365376e+08 node_disk_read_bytes_total{device="vdb"} 403456 # HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. # TYPE node_disk_read_time_seconds_total counter node_disk_read_time_seconds_total{device="vda"} 4.179 node_disk_read_time_seconds_total{device="vdb"} 0.009000000000000001 # HELP node_disk_reads_completed_total The total number of reads completed successfully. # TYPE node_disk_reads_completed_total counter node_disk_reads_completed_total{device="vda"} 16142 node_disk_reads_completed_total{device="vdb"} 105 # HELP node_disk_reads_merged_total The total number of reads merged. # TYPE node_disk_reads_merged_total counter node_disk_reads_merged_total{device="vda"} 2362 node_disk_reads_merged_total{device="vdb"} 0 # HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. # TYPE node_disk_write_time_seconds_total counter node_disk_write_time_seconds_total{device="vda"} 6604.376 node_disk_write_time_seconds_total{device="vdb"} 0 # HELP node_disk_writes_completed_total The total number of writes completed successfully. # TYPE node_disk_writes_completed_total counter node_disk_writes_completed_total{device="vda"} 4.710915e+06 node_disk_writes_completed_total{device="vdb"} 0 # HELP node_disk_writes_merged_total The number of writes merged. # TYPE node_disk_writes_merged_total counter node_disk_writes_merged_total{device="vda"} 3.205159e+06 node_disk_writes_merged_total{device="vdb"} 0 # HELP node_disk_written_bytes_total The total number of bytes written successfully. # TYPE node_disk_written_bytes_total counter node_disk_written_bytes_total{device="vda"} 4.0521360896e+10 node_disk_written_bytes_total{device="vdb"} 0 # HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. # TYPE node_dmi_info gauge node_dmi_info{bios_date="12/12/2017",bios_release="0.0",bios_vendor="DigitalOcean",bios_version="20171212",board_asset_tag="454839219",board_name="Droplet",board_vendor="DigitalOcean",board_version="20171212",chassis_asset_tag="",chassis_vendor="QEMU",chassis_version="pc-i440fx-6.1",product_family="DigitalOcean_Droplet",product_name="Droplet",product_sku="",product_version="20171212",system_vendor="DigitalOcean"} 1 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 256 # HELP node_entropy_pool_size_bits Bits of entropy pool. # TYPE node_entropy_pool_size_bits gauge node_entropy_pool_size_bits 256 # HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. # TYPE node_exporter_build_info gauge node_exporter_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.20.4",revision="ff7f9d69b645cb691dd3e84dc3afc88f5c006962",tags="netgo osusergo static_build",version="1.6.0"} 1 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 5568 # HELP node_filefd_maximum File descriptor statistics: maximum. # TYPE node_filefd_maximum gauge node_filefd_maximum 9.223372036854776e+18 # HELP node_filesystem_avail_bytes Filesystem space available to non-root users in bytes. # TYPE node_filesystem_avail_bytes gauge node_filesystem_avail_bytes{device="/dev/vda1",fstype="ext4",mountpoint="/"} 1.53244491776e+11 # HELP node_filesystem_device_error Whether an error occurred while getting statistics for the given device. # TYPE node_filesystem_device_error gauge node_filesystem_device_error{device="/dev/vda1",fstype="ext4",mountpoint="/"} 0 node_filesystem_device_error{device="/dev/vda1",fstype="ext4",mountpoint="/var/lib/kubelet/pods/a6ca48b8-f8cd-457d-b095-37c33ab803c5/volume-subpaths/hubble-ui-nginx-conf/frontend/0"} 1 node_filesystem_device_error{device="/dev/vda15",fstype="vfat",mountpoint="/boot/efi"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/0a3fb62c115873e7dfbfe166d0ccd4cb106fae647777900dd7791f99a5d9bcde/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/0feb02b41cd517ded44ffc0a60ffa055e833ee03c51e9862e4008df5a103634f/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/224d767616dcacc059cb58b807843679f49066992779224dee9d023d5e9c37b3/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/27d7d5040c15df0113b5ff06a87071c65faea9f6654fd64497302f1e9083ebe0/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/3947e0a87a4f59ac64741fc6353de33fe619685b541ca8dde98b22e100a05a21/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/42655a60b81b43fe7304ff71c2210005d52dd1e2bd268b9717018740a36cefef/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/433194bd024b38a1faa6dfdffcc2152af64548da8ae8b4315af20a1164a882d9/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/4f972baa8700865cbc70fe9f5f78a30266b1f26f82ef59f99789845b1a547b68/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/59c4237351e128d1c24381d8add7f7171fc0efff79fedeb6bd83d5d09366bac5/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/5d91cd52bd44cf456f6a0c0afd5c58be14087d1c68e3a2fe51bbd62a6e44a760/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/7074d9ef45b9fd2f9f068fbca6a7417e6be50bf52121f2679c994a2ab9214039/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/9292b8eba1fb7506949e0b6f8b4341fcce6f6d00bd0f5c974a70112e2b0e5a65/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/b8446a5237263b6f714152f0e03285f2980c0fff233fc6793cb4602980ebcfa5/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/bae2d964f67e166f3add53a8fb5f29e892cdd9bcb1b5b51d97f0ed08faa310db/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/db94e355d308ca35509e4ab1f0be2604632c65f52bf309ca52e1ef8bcf29f553/shm"} 1 node_filesystem_device_error{device="shm",fstype="tmpfs",mountpoint="/run/containerd/io.containerd.grpc.v1.cri/sandboxes/dbd662a6c84f4599ecb69511424f38d0affe5367d891862657afc25f2011c897/shm"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/run"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/run/lock"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/27e64b74-8ff3-44a3-a53d-b8924f5211c1/volumes/kubernetes.io~projected/kube-api-access-nvvh8"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/375824b7-25a0-4533-92c4-a60f28d00995/volumes/kubernetes.io~projected/kube-api-access-p5rsk"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/381735d9-ef4d-4ba4-ba9f-48b91f26d5c2/volumes/kubernetes.io~projected/kube-api-access-jqzbd"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/458ac8e2-9f3d-4371-ae8c-4bcf57e214cd/volumes/kubernetes.io~projected/kube-api-access-zcwrw"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/560eeb81-2571-4ab2-a76c-16a9b007ecb8/volumes/kubernetes.io~projected/clustermesh-secrets"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/560eeb81-2571-4ab2-a76c-16a9b007ecb8/volumes/kubernetes.io~projected/hubble-tls"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/560eeb81-2571-4ab2-a76c-16a9b007ecb8/volumes/kubernetes.io~projected/kube-api-access-xfdxd"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/9c295138-87fc-413a-b7f1-b6e3af26607d/volumes/kubernetes.io~projected/kube-api-access-gdhdb"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/a001319e-585f-48ca-a9fc-b553debd28dc/volumes/kubernetes.io~projected/kube-api-access-phv6z"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/a6ca48b8-f8cd-457d-b095-37c33ab803c5/volumes/kubernetes.io~projected/kube-api-access-xlrrk"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/b112dab9-aa92-4f7c-bf56-508c0db60335/volumes/kubernetes.io~projected/kube-api-access-tb2w6"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/ce803231-c9c6-4cb6-8d76-60d631059cba/volumes/kubernetes.io~projected/konnectivity-agent-token"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/ce803231-c9c6-4cb6-8d76-60d631059cba/volumes/kubernetes.io~projected/kube-api-access-dq4wt"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/ddde6959-c5d7-4f93-8674-b58d824c523e/volumes/kubernetes.io~projected/kube-api-access-n99qw"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/e3c18ffc-942c-48e4-8234-78e445799410/volumes/kubernetes.io~projected/tls"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/ef021fba-c97c-4136-be28-3ee22704d81c/volumes/kubernetes.io~projected/kube-api-access-dglx5"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/fe8e2492-d9ea-4749-9036-6a5e2ff30681/volumes/kubernetes.io~projected/kube-api-access-sncn8"} 1 node_filesystem_device_error{device="tmpfs",fstype="tmpfs",mountpoint="/var/lib/kubelet/pods/fe8e2492-d9ea-4749-9036-6a5e2ff30681/volumes/kubernetes.io~secret/kube-proxy-kubeconfig"} 1 # HELP node_filesystem_files Filesystem total file nodes. # TYPE node_filesystem_files gauge node_filesystem_files{device="/dev/vda1",fstype="ext4",mountpoint="/"} 1.0477568e+07 # HELP node_filesystem_files_free Filesystem total free file nodes. # TYPE node_filesystem_files_free gauge node_filesystem_files_free{device="/dev/vda1",fstype="ext4",mountpoint="/"} 1.0147494e+07 # HELP node_filesystem_free_bytes Filesystem free space in bytes. # TYPE node_filesystem_free_bytes gauge node_filesystem_free_bytes{device="/dev/vda1",fstype="ext4",mountpoint="/"} 1.60147750912e+11 # HELP node_filesystem_readonly Filesystem read-only status. # TYPE node_filesystem_readonly gauge node_filesystem_readonly{device="/dev/vda1",fstype="ext4",mountpoint="/"} 0 # HELP node_filesystem_size_bytes Filesystem size in bytes. # TYPE node_filesystem_size_bytes gauge node_filesystem_size_bytes{device="/dev/vda1",fstype="ext4",mountpoint="/"} 1.68923357184e+11 # HELP node_forks_total Total number of forks. # TYPE node_forks_total counter node_forks_total 1.246629e+06 # HELP node_intr_total Total number of interrupts serviced. # TYPE node_intr_total counter node_intr_total 2.9071294883e+10 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.06 # HELP node_load15 15m load average. # TYPE node_load15 gauge node_load15 0.17 # HELP node_load5 5m load average. # TYPE node_load5 gauge node_load5 0.18 # HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. # TYPE node_memory_Active_anon_bytes gauge node_memory_Active_anon_bytes 7.053312e+06 # HELP node_memory_Active_bytes Memory information field Active_bytes. # TYPE node_memory_Active_bytes gauge node_memory_Active_bytes 8.93136896e+08 # HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. # TYPE node_memory_Active_file_bytes gauge node_memory_Active_file_bytes 8.86083584e+08 # HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. # TYPE node_memory_AnonHugePages_bytes gauge node_memory_AnonHugePages_bytes 5.20093696e+08 # HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. # TYPE node_memory_AnonPages_bytes gauge node_memory_AnonPages_bytes 1.273294848e+09 # HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. # TYPE node_memory_Bounce_bytes gauge node_memory_Bounce_bytes 0 # HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. # TYPE node_memory_Buffers_bytes gauge node_memory_Buffers_bytes 3.02419968e+08 # HELP node_memory_Cached_bytes Memory information field Cached_bytes. # TYPE node_memory_Cached_bytes gauge node_memory_Cached_bytes 5.188243456e+09 # HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. # TYPE node_memory_CommitLimit_bytes gauge node_memory_CommitLimit_bytes 4.163231744e+09 # HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. # TYPE node_memory_Committed_AS_bytes gauge node_memory_Committed_AS_bytes 3.20704512e+09 # HELP node_memory_DirectMap2M_bytes Memory information field DirectMap2M_bytes. # TYPE node_memory_DirectMap2M_bytes gauge node_memory_DirectMap2M_bytes 8.453619712e+09 # HELP node_memory_DirectMap4k_bytes Memory information field DirectMap4k_bytes. # TYPE node_memory_DirectMap4k_bytes gauge node_memory_DirectMap4k_bytes 1.36163328e+08 # HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. # TYPE node_memory_Dirty_bytes gauge node_memory_Dirty_bytes 20480 # HELP node_memory_FileHugePages_bytes Memory information field FileHugePages_bytes. # TYPE node_memory_FileHugePages_bytes gauge node_memory_FileHugePages_bytes 0 # HELP node_memory_FilePmdMapped_bytes Memory information field FilePmdMapped_bytes. # TYPE node_memory_FilePmdMapped_bytes gauge node_memory_FilePmdMapped_bytes 0 # HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. # TYPE node_memory_HardwareCorrupted_bytes gauge node_memory_HardwareCorrupted_bytes 0 # HELP node_memory_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_HugePages_Free gauge node_memory_HugePages_Free 0 # HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. # TYPE node_memory_HugePages_Rsvd gauge node_memory_HugePages_Rsvd 0 # HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_HugePages_Surp gauge node_memory_HugePages_Surp 0 # HELP node_memory_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_HugePages_Total gauge node_memory_HugePages_Total 0 # HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. # TYPE node_memory_Hugepagesize_bytes gauge node_memory_Hugepagesize_bytes 2.097152e+06 # HELP node_memory_Hugetlb_bytes Memory information field Hugetlb_bytes. # TYPE node_memory_Hugetlb_bytes gauge node_memory_Hugetlb_bytes 0 # HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. # TYPE node_memory_Inactive_anon_bytes gauge node_memory_Inactive_anon_bytes 1.366945792e+09 # HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. # TYPE node_memory_Inactive_bytes gauge node_memory_Inactive_bytes 5.953425408e+09 # HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. # TYPE node_memory_Inactive_file_bytes gauge node_memory_Inactive_file_bytes 4.586479616e+09 # HELP node_memory_KReclaimable_bytes Memory information field KReclaimable_bytes. # TYPE node_memory_KReclaimable_bytes gauge node_memory_KReclaimable_bytes 7.0531072e+08 # HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. # TYPE node_memory_KernelStack_bytes gauge node_memory_KernelStack_bytes 9.814016e+06 # HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. # TYPE node_memory_Mapped_bytes gauge node_memory_Mapped_bytes 6.47979008e+08 # HELP node_memory_MemAvailable_bytes Memory information field MemAvailable_bytes. # TYPE node_memory_MemAvailable_bytes gauge node_memory_MemAvailable_bytes 6.299774976e+09 # HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. # TYPE node_memory_MemFree_bytes gauge node_memory_MemFree_bytes 4.34405376e+08 # HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. # TYPE node_memory_MemTotal_bytes gauge node_memory_MemTotal_bytes 8.326467584e+09 # HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. # TYPE node_memory_Mlocked_bytes gauge node_memory_Mlocked_bytes 1.1218944e+07 # HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. # TYPE node_memory_NFS_Unstable_bytes gauge node_memory_NFS_Unstable_bytes 0 # HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. # TYPE node_memory_PageTables_bytes gauge node_memory_PageTables_bytes 2.070528e+07 # HELP node_memory_Percpu_bytes Memory information field Percpu_bytes. # TYPE node_memory_Percpu_bytes gauge node_memory_Percpu_bytes 3.244032e+06 # HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. # TYPE node_memory_SReclaimable_bytes gauge node_memory_SReclaimable_bytes 7.0531072e+08 # HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. # TYPE node_memory_SUnreclaim_bytes gauge node_memory_SUnreclaim_bytes 1.65195776e+08 # HELP node_memory_SecPageTables_bytes Memory information field SecPageTables_bytes. # TYPE node_memory_SecPageTables_bytes gauge node_memory_SecPageTables_bytes 0 # HELP node_memory_ShmemHugePages_bytes Memory information field ShmemHugePages_bytes. # TYPE node_memory_ShmemHugePages_bytes gauge node_memory_ShmemHugePages_bytes 0 # HELP node_memory_ShmemPmdMapped_bytes Memory information field ShmemPmdMapped_bytes. # TYPE node_memory_ShmemPmdMapped_bytes gauge node_memory_ShmemPmdMapped_bytes 0 # HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. # TYPE node_memory_Shmem_bytes gauge node_memory_Shmem_bytes 1.425408e+07 # HELP node_memory_Slab_bytes Memory information field Slab_bytes. # TYPE node_memory_Slab_bytes gauge node_memory_Slab_bytes 8.70506496e+08 # HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. # TYPE node_memory_SwapCached_bytes gauge node_memory_SwapCached_bytes 0 # HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. # TYPE node_memory_SwapFree_bytes gauge node_memory_SwapFree_bytes 0 # HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. # TYPE node_memory_SwapTotal_bytes gauge node_memory_SwapTotal_bytes 0 # HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. # TYPE node_memory_Unevictable_bytes gauge node_memory_Unevictable_bytes 1.1218944e+07 # HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. # TYPE node_memory_VmallocChunk_bytes gauge node_memory_VmallocChunk_bytes 0 # HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. # TYPE node_memory_VmallocTotal_bytes gauge node_memory_VmallocTotal_bytes 3.5184372087808e+13 # HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. # TYPE node_memory_VmallocUsed_bytes gauge node_memory_VmallocUsed_bytes 7.6791808e+07 # HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. # TYPE node_memory_WritebackTmp_bytes gauge node_memory_WritebackTmp_bytes 0 # HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. # TYPE node_memory_Writeback_bytes gauge node_memory_Writeback_bytes 0 # HELP node_memory_Zswap_bytes Memory information field Zswap_bytes. # TYPE node_memory_Zswap_bytes gauge node_memory_Zswap_bytes 0 # HELP node_memory_Zswapped_bytes Memory information field Zswapped_bytes. # TYPE node_memory_Zswapped_bytes gauge node_memory_Zswapped_bytes 0 # HELP node_netstat_Icmp6_InErrors Statistic Icmp6InErrors. # TYPE node_netstat_Icmp6_InErrors untyped node_netstat_Icmp6_InErrors 0 # HELP node_netstat_Icmp6_InMsgs Statistic Icmp6InMsgs. # TYPE node_netstat_Icmp6_InMsgs untyped node_netstat_Icmp6_InMsgs 0 # HELP node_netstat_Icmp6_OutMsgs Statistic Icmp6OutMsgs. # TYPE node_netstat_Icmp6_OutMsgs untyped node_netstat_Icmp6_OutMsgs 1289 # HELP node_netstat_Icmp_InErrors Statistic IcmpInErrors. # TYPE node_netstat_Icmp_InErrors untyped node_netstat_Icmp_InErrors 0 # HELP node_netstat_Icmp_InMsgs Statistic IcmpInMsgs. # TYPE node_netstat_Icmp_InMsgs untyped node_netstat_Icmp_InMsgs 0 # HELP node_netstat_Icmp_OutMsgs Statistic IcmpOutMsgs. # TYPE node_netstat_Icmp_OutMsgs untyped node_netstat_Icmp_OutMsgs 0 # HELP node_netstat_Ip6_InOctets Statistic Ip6InOctets. # TYPE node_netstat_Ip6_InOctets untyped node_netstat_Ip6_InOctets 72468 # HELP node_netstat_Ip6_OutOctets Statistic Ip6OutOctets. # TYPE node_netstat_Ip6_OutOctets untyped node_netstat_Ip6_OutOctets 72280 # HELP node_netstat_IpExt_InOctets Statistic IpExtInOctets. # TYPE node_netstat_IpExt_InOctets untyped node_netstat_IpExt_InOctets 1.1589068e+07 # HELP node_netstat_IpExt_OutOctets Statistic IpExtOutOctets. # TYPE node_netstat_IpExt_OutOctets untyped node_netstat_IpExt_OutOctets 1.22636219e+08 # HELP node_netstat_Ip_Forwarding Statistic IpForwarding. # TYPE node_netstat_Ip_Forwarding untyped node_netstat_Ip_Forwarding 1 # HELP node_netstat_TcpExt_ListenDrops Statistic TcpExtListenDrops. # TYPE node_netstat_TcpExt_ListenDrops untyped node_netstat_TcpExt_ListenDrops 0 # HELP node_netstat_TcpExt_ListenOverflows Statistic TcpExtListenOverflows. # TYPE node_netstat_TcpExt_ListenOverflows untyped node_netstat_TcpExt_ListenOverflows 0 # HELP node_netstat_TcpExt_SyncookiesFailed Statistic TcpExtSyncookiesFailed. # TYPE node_netstat_TcpExt_SyncookiesFailed untyped node_netstat_TcpExt_SyncookiesFailed 0 # HELP node_netstat_TcpExt_SyncookiesRecv Statistic TcpExtSyncookiesRecv. # TYPE node_netstat_TcpExt_SyncookiesRecv untyped node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 # HELP node_netstat_TcpExt_TCPSynRetrans Statistic TcpExtTCPSynRetrans. # TYPE node_netstat_TcpExt_TCPSynRetrans untyped node_netstat_TcpExt_TCPSynRetrans 2 # HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. # TYPE node_netstat_TcpExt_TCPTimeouts untyped node_netstat_TcpExt_TCPTimeouts 0 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 0 # HELP node_netstat_Tcp_CurrEstab Statistic TcpCurrEstab. # TYPE node_netstat_Tcp_CurrEstab untyped node_netstat_Tcp_CurrEstab 1 # HELP node_netstat_Tcp_InErrs Statistic TcpInErrs. # TYPE node_netstat_Tcp_InErrs untyped node_netstat_Tcp_InErrs 0 # HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. # TYPE node_netstat_Tcp_InSegs untyped node_netstat_Tcp_InSegs 109945 # HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts. # TYPE node_netstat_Tcp_OutRsts untyped node_netstat_Tcp_OutRsts 0 # HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. # TYPE node_netstat_Tcp_OutSegs untyped node_netstat_Tcp_OutSegs 142367 # HELP node_netstat_Tcp_PassiveOpens Statistic TcpPassiveOpens. # TYPE node_netstat_Tcp_PassiveOpens untyped node_netstat_Tcp_PassiveOpens 8691 # HELP node_netstat_Tcp_RetransSegs Statistic TcpRetransSegs. # TYPE node_netstat_Tcp_RetransSegs untyped node_netstat_Tcp_RetransSegs 67 # HELP node_netstat_Udp6_InDatagrams Statistic Udp6InDatagrams. # TYPE node_netstat_Udp6_InDatagrams untyped node_netstat_Udp6_InDatagrams 0 # HELP node_netstat_Udp6_InErrors Statistic Udp6InErrors. # TYPE node_netstat_Udp6_InErrors untyped node_netstat_Udp6_InErrors 0 # HELP node_netstat_Udp6_NoPorts Statistic Udp6NoPorts. # TYPE node_netstat_Udp6_NoPorts untyped node_netstat_Udp6_NoPorts 0 # HELP node_netstat_Udp6_OutDatagrams Statistic Udp6OutDatagrams. # TYPE node_netstat_Udp6_OutDatagrams untyped node_netstat_Udp6_OutDatagrams 0 # HELP node_netstat_Udp6_RcvbufErrors Statistic Udp6RcvbufErrors. # TYPE node_netstat_Udp6_RcvbufErrors untyped node_netstat_Udp6_RcvbufErrors 0 # HELP node_netstat_Udp6_SndbufErrors Statistic Udp6SndbufErrors. # TYPE node_netstat_Udp6_SndbufErrors untyped node_netstat_Udp6_SndbufErrors 0 # HELP node_netstat_UdpLite6_InErrors Statistic UdpLite6InErrors. # TYPE node_netstat_UdpLite6_InErrors untyped node_netstat_UdpLite6_InErrors 0 # HELP node_netstat_UdpLite_InErrors Statistic UdpLiteInErrors. # TYPE node_netstat_UdpLite_InErrors untyped node_netstat_UdpLite_InErrors 0 # HELP node_netstat_Udp_InDatagrams Statistic UdpInDatagrams. # TYPE node_netstat_Udp_InDatagrams untyped node_netstat_Udp_InDatagrams 0 # HELP node_netstat_Udp_InErrors Statistic UdpInErrors. # TYPE node_netstat_Udp_InErrors untyped node_netstat_Udp_InErrors 0 # HELP node_netstat_Udp_NoPorts Statistic UdpNoPorts. # TYPE node_netstat_Udp_NoPorts untyped node_netstat_Udp_NoPorts 0 # HELP node_netstat_Udp_OutDatagrams Statistic UdpOutDatagrams. # TYPE node_netstat_Udp_OutDatagrams untyped node_netstat_Udp_OutDatagrams 0 # HELP node_netstat_Udp_RcvbufErrors Statistic UdpRcvbufErrors. # TYPE node_netstat_Udp_RcvbufErrors untyped node_netstat_Udp_RcvbufErrors 0 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped node_netstat_Udp_SndbufErrors 0 # HELP node_network_address_assign_type Network device property: address_assign_type # TYPE node_network_address_assign_type gauge node_network_address_assign_type{device="cilium_host"} 3 node_network_address_assign_type{device="cilium_net"} 3 node_network_address_assign_type{device="cpbridge"} 3 node_network_address_assign_type{device="docker0"} 3 node_network_address_assign_type{device="eth0"} 0 node_network_address_assign_type{device="eth1"} 0 node_network_address_assign_type{device="lo"} 0 node_network_address_assign_type{device="lxc2cc26275f9a8"} 3 node_network_address_assign_type{device="lxc31d91d2beeb5"} 3 node_network_address_assign_type{device="lxc3b22b4e56c86"} 3 node_network_address_assign_type{device="lxc3c50787bf90e"} 3 node_network_address_assign_type{device="lxc74f1b1c377df"} 3 node_network_address_assign_type{device="lxc835988c36bd1"} 3 node_network_address_assign_type{device="lxc8e409c75e910"} 3 node_network_address_assign_type{device="lxc_health"} 3 node_network_address_assign_type{device="lxce6b22a74964a"} 3 node_network_address_assign_type{device="lxce7e05c74bd90"} 3 node_network_address_assign_type{device="lxcf2de09b2e8c8"} 3 node_network_address_assign_type{device="lxcf80e6c86604d"} 3 # HELP node_network_carrier Network device property: carrier # TYPE node_network_carrier gauge node_network_carrier{device="cilium_host"} 1 node_network_carrier{device="cilium_net"} 1 node_network_carrier{device="cpbridge"} 1 node_network_carrier{device="docker0"} 0 node_network_carrier{device="eth0"} 1 node_network_carrier{device="eth1"} 1 node_network_carrier{device="lo"} 1 node_network_carrier{device="lxc2cc26275f9a8"} 1 node_network_carrier{device="lxc31d91d2beeb5"} 1 node_network_carrier{device="lxc3b22b4e56c86"} 1 node_network_carrier{device="lxc3c50787bf90e"} 1 node_network_carrier{device="lxc74f1b1c377df"} 1 node_network_carrier{device="lxc835988c36bd1"} 1 node_network_carrier{device="lxc8e409c75e910"} 1 node_network_carrier{device="lxc_health"} 1 node_network_carrier{device="lxce6b22a74964a"} 1 node_network_carrier{device="lxce7e05c74bd90"} 1 node_network_carrier{device="lxcf2de09b2e8c8"} 1 node_network_carrier{device="lxcf80e6c86604d"} 1 # HELP node_network_carrier_changes_total Network device property: carrier_changes_total # TYPE node_network_carrier_changes_total counter node_network_carrier_changes_total{device="cilium_host"} 2 node_network_carrier_changes_total{device="cilium_net"} 2 node_network_carrier_changes_total{device="cpbridge"} 0 node_network_carrier_changes_total{device="docker0"} 1 node_network_carrier_changes_total{device="eth0"} 2 node_network_carrier_changes_total{device="eth1"} 2 node_network_carrier_changes_total{device="lo"} 0 node_network_carrier_changes_total{device="lxc2cc26275f9a8"} 2 node_network_carrier_changes_total{device="lxc31d91d2beeb5"} 2 node_network_carrier_changes_total{device="lxc3b22b4e56c86"} 2 node_network_carrier_changes_total{device="lxc3c50787bf90e"} 2 node_network_carrier_changes_total{device="lxc74f1b1c377df"} 2 node_network_carrier_changes_total{device="lxc835988c36bd1"} 2 node_network_carrier_changes_total{device="lxc8e409c75e910"} 2 node_network_carrier_changes_total{device="lxc_health"} 2 node_network_carrier_changes_total{device="lxce6b22a74964a"} 2 node_network_carrier_changes_total{device="lxce7e05c74bd90"} 2 node_network_carrier_changes_total{device="lxcf2de09b2e8c8"} 2 node_network_carrier_changes_total{device="lxcf80e6c86604d"} 2 # HELP node_network_carrier_down_changes_total Network device property: carrier_down_changes_total # TYPE node_network_carrier_down_changes_total counter node_network_carrier_down_changes_total{device="cilium_host"} 1 node_network_carrier_down_changes_total{device="cilium_net"} 1 node_network_carrier_down_changes_total{device="cpbridge"} 0 node_network_carrier_down_changes_total{device="docker0"} 1 node_network_carrier_down_changes_total{device="eth0"} 1 node_network_carrier_down_changes_total{device="eth1"} 1 node_network_carrier_down_changes_total{device="lo"} 0 node_network_carrier_down_changes_total{device="lxc2cc26275f9a8"} 1 node_network_carrier_down_changes_total{device="lxc31d91d2beeb5"} 1 node_network_carrier_down_changes_total{device="lxc3b22b4e56c86"} 1 node_network_carrier_down_changes_total{device="lxc3c50787bf90e"} 1 node_network_carrier_down_changes_total{device="lxc74f1b1c377df"} 1 node_network_carrier_down_changes_total{device="lxc835988c36bd1"} 1 node_network_carrier_down_changes_total{device="lxc8e409c75e910"} 1 node_network_carrier_down_changes_total{device="lxc_health"} 1 node_network_carrier_down_changes_total{device="lxce6b22a74964a"} 1 node_network_carrier_down_changes_total{device="lxce7e05c74bd90"} 1 node_network_carrier_down_changes_total{device="lxcf2de09b2e8c8"} 1 node_network_carrier_down_changes_total{device="lxcf80e6c86604d"} 1 # HELP node_network_carrier_up_changes_total Network device property: carrier_up_changes_total # TYPE node_network_carrier_up_changes_total counter node_network_carrier_up_changes_total{device="cilium_host"} 1 node_network_carrier_up_changes_total{device="cilium_net"} 1 node_network_carrier_up_changes_total{device="cpbridge"} 0 node_network_carrier_up_changes_total{device="docker0"} 0 node_network_carrier_up_changes_total{device="eth0"} 1 node_network_carrier_up_changes_total{device="eth1"} 1 node_network_carrier_up_changes_total{device="lo"} 0 node_network_carrier_up_changes_total{device="lxc2cc26275f9a8"} 1 node_network_carrier_up_changes_total{device="lxc31d91d2beeb5"} 1 node_network_carrier_up_changes_total{device="lxc3b22b4e56c86"} 1 node_network_carrier_up_changes_total{device="lxc3c50787bf90e"} 1 node_network_carrier_up_changes_total{device="lxc74f1b1c377df"} 1 node_network_carrier_up_changes_total{device="lxc835988c36bd1"} 1 node_network_carrier_up_changes_total{device="lxc8e409c75e910"} 1 node_network_carrier_up_changes_total{device="lxc_health"} 1 node_network_carrier_up_changes_total{device="lxce6b22a74964a"} 1 node_network_carrier_up_changes_total{device="lxce7e05c74bd90"} 1 node_network_carrier_up_changes_total{device="lxcf2de09b2e8c8"} 1 node_network_carrier_up_changes_total{device="lxcf80e6c86604d"} 1 # HELP node_network_device_id Network device property: device_id # TYPE node_network_device_id gauge node_network_device_id{device="cilium_host"} 0 node_network_device_id{device="cilium_net"} 0 node_network_device_id{device="cpbridge"} 0 node_network_device_id{device="docker0"} 0 node_network_device_id{device="eth0"} 0 node_network_device_id{device="eth1"} 0 node_network_device_id{device="lo"} 0 node_network_device_id{device="lxc2cc26275f9a8"} 0 node_network_device_id{device="lxc31d91d2beeb5"} 0 node_network_device_id{device="lxc3b22b4e56c86"} 0 node_network_device_id{device="lxc3c50787bf90e"} 0 node_network_device_id{device="lxc74f1b1c377df"} 0 node_network_device_id{device="lxc835988c36bd1"} 0 node_network_device_id{device="lxc8e409c75e910"} 0 node_network_device_id{device="lxc_health"} 0 node_network_device_id{device="lxce6b22a74964a"} 0 node_network_device_id{device="lxce7e05c74bd90"} 0 node_network_device_id{device="lxcf2de09b2e8c8"} 0 node_network_device_id{device="lxcf80e6c86604d"} 0 # HELP node_network_dormant Network device property: dormant # TYPE node_network_dormant gauge node_network_dormant{device="cilium_host"} 0 node_network_dormant{device="cilium_net"} 0 node_network_dormant{device="cpbridge"} 0 node_network_dormant{device="docker0"} 0 node_network_dormant{device="eth0"} 0 node_network_dormant{device="eth1"} 0 node_network_dormant{device="lo"} 0 node_network_dormant{device="lxc2cc26275f9a8"} 0 node_network_dormant{device="lxc31d91d2beeb5"} 0 node_network_dormant{device="lxc3b22b4e56c86"} 0 node_network_dormant{device="lxc3c50787bf90e"} 0 node_network_dormant{device="lxc74f1b1c377df"} 0 node_network_dormant{device="lxc835988c36bd1"} 0 node_network_dormant{device="lxc8e409c75e910"} 0 node_network_dormant{device="lxc_health"} 0 node_network_dormant{device="lxce6b22a74964a"} 0 node_network_dormant{device="lxce7e05c74bd90"} 0 node_network_dormant{device="lxcf2de09b2e8c8"} 0 node_network_dormant{device="lxcf80e6c86604d"} 0 # HELP node_network_flags Network device property: flags # TYPE node_network_flags gauge node_network_flags{device="cilium_host"} 4227 node_network_flags{device="cilium_net"} 4227 node_network_flags{device="cpbridge"} 131 node_network_flags{device="docker0"} 4099 node_network_flags{device="eth0"} 4099 node_network_flags{device="eth1"} 4099 node_network_flags{device="lo"} 9 node_network_flags{device="lxc2cc26275f9a8"} 4099 node_network_flags{device="lxc31d91d2beeb5"} 4099 node_network_flags{device="lxc3b22b4e56c86"} 4099 node_network_flags{device="lxc3c50787bf90e"} 4099 node_network_flags{device="lxc74f1b1c377df"} 4099 node_network_flags{device="lxc835988c36bd1"} 4099 node_network_flags{device="lxc8e409c75e910"} 4099 node_network_flags{device="lxc_health"} 4099 node_network_flags{device="lxce6b22a74964a"} 4099 node_network_flags{device="lxce7e05c74bd90"} 4099 node_network_flags{device="lxcf2de09b2e8c8"} 4099 node_network_flags{device="lxcf80e6c86604d"} 4099 # HELP node_network_iface_id Network device property: iface_id # TYPE node_network_iface_id gauge node_network_iface_id{device="cilium_host"} 7 node_network_iface_id{device="cilium_net"} 6 node_network_iface_id{device="cpbridge"} 5 node_network_iface_id{device="docker0"} 4 node_network_iface_id{device="eth0"} 2 node_network_iface_id{device="eth1"} 3 node_network_iface_id{device="lo"} 1 node_network_iface_id{device="lxc2cc26275f9a8"} 13 node_network_iface_id{device="lxc31d91d2beeb5"} 15 node_network_iface_id{device="lxc3b22b4e56c86"} 59 node_network_iface_id{device="lxc3c50787bf90e"} 19 node_network_iface_id{device="lxc74f1b1c377df"} 57 node_network_iface_id{device="lxc835988c36bd1"} 71 node_network_iface_id{device="lxc8e409c75e910"} 11 node_network_iface_id{device="lxc_health"} 9 node_network_iface_id{device="lxce6b22a74964a"} 75 node_network_iface_id{device="lxce7e05c74bd90"} 73 node_network_iface_id{device="lxcf2de09b2e8c8"} 21 node_network_iface_id{device="lxcf80e6c86604d"} 77 # HELP node_network_iface_link Network device property: iface_link # TYPE node_network_iface_link gauge node_network_iface_link{device="cilium_host"} 6 node_network_iface_link{device="cilium_net"} 7 node_network_iface_link{device="cpbridge"} 5 node_network_iface_link{device="docker0"} 4 node_network_iface_link{device="eth0"} 2 node_network_iface_link{device="eth1"} 3 node_network_iface_link{device="lo"} 1 node_network_iface_link{device="lxc2cc26275f9a8"} 12 node_network_iface_link{device="lxc31d91d2beeb5"} 14 node_network_iface_link{device="lxc3b22b4e56c86"} 58 node_network_iface_link{device="lxc3c50787bf90e"} 18 node_network_iface_link{device="lxc74f1b1c377df"} 56 node_network_iface_link{device="lxc835988c36bd1"} 70 node_network_iface_link{device="lxc8e409c75e910"} 10 node_network_iface_link{device="lxc_health"} 8 node_network_iface_link{device="lxce6b22a74964a"} 74 node_network_iface_link{device="lxce7e05c74bd90"} 72 node_network_iface_link{device="lxcf2de09b2e8c8"} 20 node_network_iface_link{device="lxcf80e6c86604d"} 76 # HELP node_network_iface_link_mode Network device property: iface_link_mode # TYPE node_network_iface_link_mode gauge node_network_iface_link_mode{device="cilium_host"} 0 node_network_iface_link_mode{device="cilium_net"} 0 node_network_iface_link_mode{device="cpbridge"} 0 node_network_iface_link_mode{device="docker0"} 0 node_network_iface_link_mode{device="eth0"} 0 node_network_iface_link_mode{device="eth1"} 0 node_network_iface_link_mode{device="lo"} 0 node_network_iface_link_mode{device="lxc2cc26275f9a8"} 0 node_network_iface_link_mode{device="lxc31d91d2beeb5"} 0 node_network_iface_link_mode{device="lxc3b22b4e56c86"} 0 node_network_iface_link_mode{device="lxc3c50787bf90e"} 0 node_network_iface_link_mode{device="lxc74f1b1c377df"} 0 node_network_iface_link_mode{device="lxc835988c36bd1"} 0 node_network_iface_link_mode{device="lxc8e409c75e910"} 0 node_network_iface_link_mode{device="lxc_health"} 0 node_network_iface_link_mode{device="lxce6b22a74964a"} 0 node_network_iface_link_mode{device="lxce7e05c74bd90"} 0 node_network_iface_link_mode{device="lxcf2de09b2e8c8"} 0 node_network_iface_link_mode{device="lxcf80e6c86604d"} 0 # HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. # TYPE node_network_info gauge node_network_info{address="00:00:00:00:00:00",adminstate="up",broadcast="00:00:00:00:00:00",device="lo",duplex="",ifalias="",operstate="unknown"} 1 node_network_info{address="02:42:18:e3:ac:72",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="docker0",duplex="unknown",ifalias="",operstate="down"} 1 node_network_info{address="02:e8:86:54:f9:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxce6b22a74964a",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="0a:7d:ab:a9:6b:5f",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="cpbridge",duplex="",ifalias="",operstate="unknown"} 1 node_network_info{address="0e:24:70:69:6e:9b",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc8e409c75e910",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="26:4d:ca:2b:0e:64",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc2cc26275f9a8",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="46:50:da:ab:6e:5d",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc3c50787bf90e",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="52:f0:36:a9:54:4b",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc74f1b1c377df",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="5a:cd:19:df:8e:32",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc31d91d2beeb5",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="7e:8e:98:84:b3:02",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc_health",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="92:4b:7b:5c:1c:97",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="unknown",ifalias="",operstate="up"} 1 node_network_info{address="b6:89:d5:95:5b:98",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth1",duplex="unknown",ifalias="",operstate="up"} 1 node_network_info{address="be:ec:94:e9:12:d6",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="cilium_host",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="c2:a9:43:b5:75:1a",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc835988c36bd1",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="c6:0a:d8:9a:4a:18",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxcf2de09b2e8c8",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="da:44:e8:00:68:4c",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxce7e05c74bd90",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="de:36:bc:b8:14:47",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="cilium_net",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="f6:71:56:ba:13:d9",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxc3b22b4e56c86",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="f6:9e:14:e2:28:f5",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="lxcf80e6c86604d",duplex="full",ifalias="",operstate="up"} 1 # HELP node_network_mtu_bytes Network device property: mtu_bytes # TYPE node_network_mtu_bytes gauge node_network_mtu_bytes{device="cilium_host"} 1500 node_network_mtu_bytes{device="cilium_net"} 1500 node_network_mtu_bytes{device="cpbridge"} 1500 node_network_mtu_bytes{device="docker0"} 1500 node_network_mtu_bytes{device="eth0"} 1500 node_network_mtu_bytes{device="eth1"} 1500 node_network_mtu_bytes{device="lo"} 65536 node_network_mtu_bytes{device="lxc2cc26275f9a8"} 1500 node_network_mtu_bytes{device="lxc31d91d2beeb5"} 1500 node_network_mtu_bytes{device="lxc3b22b4e56c86"} 1500 node_network_mtu_bytes{device="lxc3c50787bf90e"} 1500 node_network_mtu_bytes{device="lxc74f1b1c377df"} 1500 node_network_mtu_bytes{device="lxc835988c36bd1"} 1500 node_network_mtu_bytes{device="lxc8e409c75e910"} 1500 node_network_mtu_bytes{device="lxc_health"} 1500 node_network_mtu_bytes{device="lxce6b22a74964a"} 1500 node_network_mtu_bytes{device="lxce7e05c74bd90"} 1500 node_network_mtu_bytes{device="lxcf2de09b2e8c8"} 1500 node_network_mtu_bytes{device="lxcf80e6c86604d"} 1500 # HELP node_network_name_assign_type Network device property: name_assign_type # TYPE node_network_name_assign_type gauge node_network_name_assign_type{device="cilium_host"} 3 node_network_name_assign_type{device="cilium_net"} 3 node_network_name_assign_type{device="cpbridge"} 3 node_network_name_assign_type{device="docker0"} 3 node_network_name_assign_type{device="eth0"} 1 node_network_name_assign_type{device="eth1"} 1 node_network_name_assign_type{device="lo"} 2 node_network_name_assign_type{device="lxc2cc26275f9a8"} 3 node_network_name_assign_type{device="lxc31d91d2beeb5"} 3 node_network_name_assign_type{device="lxc3b22b4e56c86"} 3 node_network_name_assign_type{device="lxc3c50787bf90e"} 3 node_network_name_assign_type{device="lxc74f1b1c377df"} 3 node_network_name_assign_type{device="lxc835988c36bd1"} 3 node_network_name_assign_type{device="lxc8e409c75e910"} 3 node_network_name_assign_type{device="lxc_health"} 3 node_network_name_assign_type{device="lxce6b22a74964a"} 3 node_network_name_assign_type{device="lxce7e05c74bd90"} 3 node_network_name_assign_type{device="lxcf2de09b2e8c8"} 3 node_network_name_assign_type{device="lxcf80e6c86604d"} 3 # HELP node_network_net_dev_group Network device property: net_dev_group # TYPE node_network_net_dev_group gauge node_network_net_dev_group{device="cilium_host"} 0 node_network_net_dev_group{device="cilium_net"} 0 node_network_net_dev_group{device="cpbridge"} 0 node_network_net_dev_group{device="docker0"} 0 node_network_net_dev_group{device="eth0"} 0 node_network_net_dev_group{device="eth1"} 0 node_network_net_dev_group{device="lo"} 0 node_network_net_dev_group{device="lxc2cc26275f9a8"} 0 node_network_net_dev_group{device="lxc31d91d2beeb5"} 0 node_network_net_dev_group{device="lxc3b22b4e56c86"} 0 node_network_net_dev_group{device="lxc3c50787bf90e"} 0 node_network_net_dev_group{device="lxc74f1b1c377df"} 0 node_network_net_dev_group{device="lxc835988c36bd1"} 0 node_network_net_dev_group{device="lxc8e409c75e910"} 0 node_network_net_dev_group{device="lxc_health"} 0 node_network_net_dev_group{device="lxce6b22a74964a"} 0 node_network_net_dev_group{device="lxce7e05c74bd90"} 0 node_network_net_dev_group{device="lxcf2de09b2e8c8"} 0 node_network_net_dev_group{device="lxcf80e6c86604d"} 0 # HELP node_network_protocol_type Network device property: protocol_type # TYPE node_network_protocol_type gauge node_network_protocol_type{device="cilium_host"} 1 node_network_protocol_type{device="cilium_net"} 1 node_network_protocol_type{device="cpbridge"} 1 node_network_protocol_type{device="docker0"} 1 node_network_protocol_type{device="eth0"} 1 node_network_protocol_type{device="eth1"} 1 node_network_protocol_type{device="lo"} 772 node_network_protocol_type{device="lxc2cc26275f9a8"} 1 node_network_protocol_type{device="lxc31d91d2beeb5"} 1 node_network_protocol_type{device="lxc3b22b4e56c86"} 1 node_network_protocol_type{device="lxc3c50787bf90e"} 1 node_network_protocol_type{device="lxc74f1b1c377df"} 1 node_network_protocol_type{device="lxc835988c36bd1"} 1 node_network_protocol_type{device="lxc8e409c75e910"} 1 node_network_protocol_type{device="lxc_health"} 1 node_network_protocol_type{device="lxce6b22a74964a"} 1 node_network_protocol_type{device="lxce7e05c74bd90"} 1 node_network_protocol_type{device="lxcf2de09b2e8c8"} 1 node_network_protocol_type{device="lxcf80e6c86604d"} 1 # HELP node_network_receive_bytes_total Network device statistic receive_bytes. # TYPE node_network_receive_bytes_total counter node_network_receive_bytes_total{device="eth0"} 1.391653e+07 node_network_receive_bytes_total{device="lo"} 0 # HELP node_network_receive_compressed_total Network device statistic receive_compressed. # TYPE node_network_receive_compressed_total counter node_network_receive_compressed_total{device="eth0"} 0 node_network_receive_compressed_total{device="lo"} 0 # HELP node_network_receive_drop_total Network device statistic receive_drop. # TYPE node_network_receive_drop_total counter node_network_receive_drop_total{device="eth0"} 0 node_network_receive_drop_total{device="lo"} 0 # HELP node_network_receive_errs_total Network device statistic receive_errs. # TYPE node_network_receive_errs_total counter node_network_receive_errs_total{device="eth0"} 0 node_network_receive_errs_total{device="lo"} 0 # HELP node_network_receive_fifo_total Network device statistic receive_fifo. # TYPE node_network_receive_fifo_total counter node_network_receive_fifo_total{device="eth0"} 0 node_network_receive_fifo_total{device="lo"} 0 # HELP node_network_receive_frame_total Network device statistic receive_frame. # TYPE node_network_receive_frame_total counter node_network_receive_frame_total{device="eth0"} 0 node_network_receive_frame_total{device="lo"} 0 # HELP node_network_receive_multicast_total Network device statistic receive_multicast. # TYPE node_network_receive_multicast_total counter node_network_receive_multicast_total{device="eth0"} 0 node_network_receive_multicast_total{device="lo"} 0 # HELP node_network_receive_nohandler_total Network device statistic receive_nohandler. # TYPE node_network_receive_nohandler_total counter node_network_receive_nohandler_total{device="eth0"} 0 node_network_receive_nohandler_total{device="lo"} 0 # HELP node_network_receive_packets_total Network device statistic receive_packets. # TYPE node_network_receive_packets_total counter node_network_receive_packets_total{device="eth0"} 127847 node_network_receive_packets_total{device="lo"} 0 # HELP node_network_speed_bytes Network device property: speed_bytes # TYPE node_network_speed_bytes gauge node_network_speed_bytes{device="cilium_host"} 1.25e+09 node_network_speed_bytes{device="cilium_net"} 1.25e+09 node_network_speed_bytes{device="docker0"} -125000 node_network_speed_bytes{device="eth0"} -125000 node_network_speed_bytes{device="eth1"} -125000 node_network_speed_bytes{device="lxc2cc26275f9a8"} 1.25e+09 node_network_speed_bytes{device="lxc31d91d2beeb5"} 1.25e+09 node_network_speed_bytes{device="lxc3b22b4e56c86"} 1.25e+09 node_network_speed_bytes{device="lxc3c50787bf90e"} 1.25e+09 node_network_speed_bytes{device="lxc74f1b1c377df"} 1.25e+09 node_network_speed_bytes{device="lxc835988c36bd1"} 1.25e+09 node_network_speed_bytes{device="lxc8e409c75e910"} 1.25e+09 node_network_speed_bytes{device="lxc_health"} 1.25e+09 node_network_speed_bytes{device="lxce6b22a74964a"} 1.25e+09 node_network_speed_bytes{device="lxce7e05c74bd90"} 1.25e+09 node_network_speed_bytes{device="lxcf2de09b2e8c8"} 1.25e+09 node_network_speed_bytes{device="lxcf80e6c86604d"} 1.25e+09 # HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. # TYPE node_network_transmit_bytes_total counter node_network_transmit_bytes_total{device="eth0"} 1.24717135e+08 node_network_transmit_bytes_total{device="lo"} 0 # HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. # TYPE node_network_transmit_carrier_total counter node_network_transmit_carrier_total{device="eth0"} 0 node_network_transmit_carrier_total{device="lo"} 0 # HELP node_network_transmit_colls_total Network device statistic transmit_colls. # TYPE node_network_transmit_colls_total counter node_network_transmit_colls_total{device="eth0"} 0 node_network_transmit_colls_total{device="lo"} 0 # HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. # TYPE node_network_transmit_compressed_total counter node_network_transmit_compressed_total{device="eth0"} 0 node_network_transmit_compressed_total{device="lo"} 0 # HELP node_network_transmit_drop_total Network device statistic transmit_drop. # TYPE node_network_transmit_drop_total counter node_network_transmit_drop_total{device="eth0"} 0 node_network_transmit_drop_total{device="lo"} 0 # HELP node_network_transmit_errs_total Network device statistic transmit_errs. # TYPE node_network_transmit_errs_total counter node_network_transmit_errs_total{device="eth0"} 0 node_network_transmit_errs_total{device="lo"} 0 # HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. # TYPE node_network_transmit_fifo_total counter node_network_transmit_fifo_total{device="eth0"} 0 node_network_transmit_fifo_total{device="lo"} 0 # HELP node_network_transmit_packets_total Network device statistic transmit_packets. # TYPE node_network_transmit_packets_total counter node_network_transmit_packets_total{device="eth0"} 110250 node_network_transmit_packets_total{device="lo"} 0 # HELP node_network_transmit_queue_length Network device property: transmit_queue_length # TYPE node_network_transmit_queue_length gauge node_network_transmit_queue_length{device="cilium_host"} 1000 node_network_transmit_queue_length{device="cilium_net"} 1000 node_network_transmit_queue_length{device="cpbridge"} 1000 node_network_transmit_queue_length{device="docker0"} 0 node_network_transmit_queue_length{device="eth0"} 1000 node_network_transmit_queue_length{device="eth1"} 1000 node_network_transmit_queue_length{device="lo"} 1000 node_network_transmit_queue_length{device="lxc2cc26275f9a8"} 1000 node_network_transmit_queue_length{device="lxc31d91d2beeb5"} 1000 node_network_transmit_queue_length{device="lxc3b22b4e56c86"} 1000 node_network_transmit_queue_length{device="lxc3c50787bf90e"} 1000 node_network_transmit_queue_length{device="lxc74f1b1c377df"} 1000 node_network_transmit_queue_length{device="lxc835988c36bd1"} 1000 node_network_transmit_queue_length{device="lxc8e409c75e910"} 1000 node_network_transmit_queue_length{device="lxc_health"} 1000 node_network_transmit_queue_length{device="lxce6b22a74964a"} 1000 node_network_transmit_queue_length{device="lxce7e05c74bd90"} 1000 node_network_transmit_queue_length{device="lxcf2de09b2e8c8"} 1000 node_network_transmit_queue_length{device="lxcf80e6c86604d"} 1000 # HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. # TYPE node_network_up gauge node_network_up{device="cilium_host"} 1 node_network_up{device="cilium_net"} 1 node_network_up{device="cpbridge"} 0 node_network_up{device="docker0"} 0 node_network_up{device="eth0"} 1 node_network_up{device="eth1"} 1 node_network_up{device="lo"} 0 node_network_up{device="lxc2cc26275f9a8"} 1 node_network_up{device="lxc31d91d2beeb5"} 1 node_network_up{device="lxc3b22b4e56c86"} 1 node_network_up{device="lxc3c50787bf90e"} 1 node_network_up{device="lxc74f1b1c377df"} 1 node_network_up{device="lxc835988c36bd1"} 1 node_network_up{device="lxc8e409c75e910"} 1 node_network_up{device="lxc_health"} 1 node_network_up{device="lxce6b22a74964a"} 1 node_network_up{device="lxce7e05c74bd90"} 1 node_network_up{device="lxcf2de09b2e8c8"} 1 node_network_up{device="lxcf80e6c86604d"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge node_nf_conntrack_entries 0 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 131072 # HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. # TYPE node_nf_conntrack_stat_drop gauge node_nf_conntrack_stat_drop 0 # HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. # TYPE node_nf_conntrack_stat_early_drop gauge node_nf_conntrack_stat_early_drop 0 # HELP node_nf_conntrack_stat_found Number of searched entries which were successful. # TYPE node_nf_conntrack_stat_found gauge node_nf_conntrack_stat_found 0 # HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. # TYPE node_nf_conntrack_stat_ignore gauge node_nf_conntrack_stat_ignore 0 # HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. # TYPE node_nf_conntrack_stat_insert gauge node_nf_conntrack_stat_insert 0 # HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. # TYPE node_nf_conntrack_stat_insert_failed gauge node_nf_conntrack_stat_insert_failed 0 # HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. # TYPE node_nf_conntrack_stat_invalid gauge node_nf_conntrack_stat_invalid 0 # HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. # TYPE node_nf_conntrack_stat_search_restart gauge node_nf_conntrack_stat_search_restart 0 # HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time # TYPE node_pressure_cpu_waiting_seconds_total counter node_pressure_cpu_waiting_seconds_total 102034.675726 # HELP node_pressure_io_stalled_seconds_total Total time in seconds no process could make progress due to IO congestion # TYPE node_pressure_io_stalled_seconds_total counter node_pressure_io_stalled_seconds_total 660.2650110000001 # HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion # TYPE node_pressure_io_waiting_seconds_total counter node_pressure_io_waiting_seconds_total 794.608443 # HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion # TYPE node_pressure_memory_stalled_seconds_total counter node_pressure_memory_stalled_seconds_total 0.31041199999999997 # HELP node_pressure_memory_waiting_seconds_total Total time in seconds that processes have waited for memory # TYPE node_pressure_memory_waiting_seconds_total counter node_pressure_memory_waiting_seconds_total 0.350031 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 1 # HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. # TYPE node_schedstat_running_seconds_total counter node_schedstat_running_seconds_total{cpu="0"} 228295.933401884 node_schedstat_running_seconds_total{cpu="1"} 197583.565214817 node_schedstat_running_seconds_total{cpu="2"} 238883.963789352 node_schedstat_running_seconds_total{cpu="3"} 231718.604779134 # HELP node_schedstat_timeslices_total Number of timeslices executed by CPU. # TYPE node_schedstat_timeslices_total counter node_schedstat_timeslices_total{cpu="0"} 5.410682115e+09 node_schedstat_timeslices_total{cpu="1"} 3.424514386e+09 node_schedstat_timeslices_total{cpu="2"} 5.739419698e+09 node_schedstat_timeslices_total{cpu="3"} 5.47284189e+09 # HELP node_schedstat_waiting_seconds_total Number of seconds spent by processing waiting for this CPU. # TYPE node_schedstat_waiting_seconds_total counter node_schedstat_waiting_seconds_total{cpu="0"} 79147.596461789 node_schedstat_waiting_seconds_total{cpu="1"} 93052.840713826 node_schedstat_waiting_seconds_total{cpu="2"} 85627.611950696 node_schedstat_waiting_seconds_total{cpu="3"} 80555.707158323 # HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. # TYPE node_scrape_collector_duration_seconds gauge node_scrape_collector_duration_seconds{collector="arp"} 6.6133e-05 node_scrape_collector_duration_seconds{collector="bcache"} 1.3776e-05 node_scrape_collector_duration_seconds{collector="bonding"} 1.6101e-05 node_scrape_collector_duration_seconds{collector="btrfs"} 0.000804119 node_scrape_collector_duration_seconds{collector="conntrack"} 0.000101252 node_scrape_collector_duration_seconds{collector="cpu"} 0.00040548 node_scrape_collector_duration_seconds{collector="cpufreq"} 7.58e-05 node_scrape_collector_duration_seconds{collector="diskstats"} 0.000248076 node_scrape_collector_duration_seconds{collector="dmi"} 1.552e-05 node_scrape_collector_duration_seconds{collector="edac"} 3.1034e-05 node_scrape_collector_duration_seconds{collector="entropy"} 8.2465e-05 node_scrape_collector_duration_seconds{collector="fibrechannel"} 7.955e-06 node_scrape_collector_duration_seconds{collector="filefd"} 3.1303e-05 node_scrape_collector_duration_seconds{collector="filesystem"} 0.001177586 node_scrape_collector_duration_seconds{collector="hwmon"} 1.8749e-05 node_scrape_collector_duration_seconds{collector="infiniband"} 1.0131e-05 node_scrape_collector_duration_seconds{collector="ipvs"} 1.8402e-05 node_scrape_collector_duration_seconds{collector="loadavg"} 3.8152e-05 node_scrape_collector_duration_seconds{collector="mdadm"} 1.9145e-05 node_scrape_collector_duration_seconds{collector="meminfo"} 0.000189835 node_scrape_collector_duration_seconds{collector="netclass"} 0.012278859 node_scrape_collector_duration_seconds{collector="netdev"} 0.000214151 node_scrape_collector_duration_seconds{collector="netstat"} 0.001277493 node_scrape_collector_duration_seconds{collector="nfs"} 4.5288e-05 node_scrape_collector_duration_seconds{collector="nfsd"} 2.3337e-05 node_scrape_collector_duration_seconds{collector="nvme"} 1.523e-05 node_scrape_collector_duration_seconds{collector="os"} 1.825e-05 node_scrape_collector_duration_seconds{collector="powersupplyclass"} 5.751e-05 node_scrape_collector_duration_seconds{collector="pressure"} 0.000111946 node_scrape_collector_duration_seconds{collector="rapl"} 2.1532e-05 node_scrape_collector_duration_seconds{collector="schedstat"} 5.8026e-05 node_scrape_collector_duration_seconds{collector="selinux"} 1.561e-06 node_scrape_collector_duration_seconds{collector="sockstat"} 0.000132228 node_scrape_collector_duration_seconds{collector="softnet"} 7.7756e-05 node_scrape_collector_duration_seconds{collector="stat"} 0.000188177 node_scrape_collector_duration_seconds{collector="tapestats"} 2.2012e-05 node_scrape_collector_duration_seconds{collector="textfile"} 2.1215e-05 node_scrape_collector_duration_seconds{collector="thermal_zone"} 0.000301897 node_scrape_collector_duration_seconds{collector="time"} 0.000190656 node_scrape_collector_duration_seconds{collector="timex"} 2.1885e-05 node_scrape_collector_duration_seconds{collector="udp_queues"} 0.000106584 node_scrape_collector_duration_seconds{collector="uname"} 8.834e-06 node_scrape_collector_duration_seconds{collector="vmstat"} 0.000160963 node_scrape_collector_duration_seconds{collector="xfs"} 1.3253e-05 node_scrape_collector_duration_seconds{collector="zfs"} 1.1049e-05 # HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. # TYPE node_scrape_collector_success gauge node_scrape_collector_success{collector="arp"} 1 node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 0 node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 node_scrape_collector_success{collector="fibrechannel"} 0 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="filesystem"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 0 node_scrape_collector_success{collector="ipvs"} 0 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 0 node_scrape_collector_success{collector="meminfo"} 1 node_scrape_collector_success{collector="netclass"} 1 node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 0 node_scrape_collector_success{collector="nfsd"} 0 node_scrape_collector_success{collector="nvme"} 0 node_scrape_collector_success{collector="os"} 0 node_scrape_collector_success{collector="powersupplyclass"} 1 node_scrape_collector_success{collector="pressure"} 1 node_scrape_collector_success{collector="rapl"} 1 node_scrape_collector_success{collector="schedstat"} 1 node_scrape_collector_success{collector="selinux"} 1 node_scrape_collector_success{collector="sockstat"} 1 node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 node_scrape_collector_success{collector="tapestats"} 0 node_scrape_collector_success{collector="textfile"} 1 node_scrape_collector_success{collector="thermal_zone"} 1 node_scrape_collector_success{collector="time"} 1 node_scrape_collector_success{collector="timex"} 1 node_scrape_collector_success{collector="udp_queues"} 1 node_scrape_collector_success{collector="uname"} 1 node_scrape_collector_success{collector="vmstat"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 0 # HELP node_selinux_enabled SELinux is enabled, 1 is true, 0 is false # TYPE node_selinux_enabled gauge node_selinux_enabled 0 # HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. # TYPE node_sockstat_FRAG6_inuse gauge node_sockstat_FRAG6_inuse 0 # HELP node_sockstat_FRAG6_memory Number of FRAG6 sockets in state memory. # TYPE node_sockstat_FRAG6_memory gauge node_sockstat_FRAG6_memory 0 # HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. # TYPE node_sockstat_FRAG_inuse gauge node_sockstat_FRAG_inuse 0 # HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. # TYPE node_sockstat_FRAG_memory gauge node_sockstat_FRAG_memory 0 # HELP node_sockstat_RAW6_inuse Number of RAW6 sockets in state inuse. # TYPE node_sockstat_RAW6_inuse gauge node_sockstat_RAW6_inuse 0 # HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. # TYPE node_sockstat_RAW_inuse gauge node_sockstat_RAW_inuse 0 # HELP node_sockstat_TCP6_inuse Number of TCP6 sockets in state inuse. # TYPE node_sockstat_TCP6_inuse gauge node_sockstat_TCP6_inuse 2 # HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. # TYPE node_sockstat_TCP_alloc gauge node_sockstat_TCP_alloc 3847 # HELP node_sockstat_TCP_inuse Number of TCP sockets in state inuse. # TYPE node_sockstat_TCP_inuse gauge node_sockstat_TCP_inuse 0 # HELP node_sockstat_TCP_mem Number of TCP sockets in state mem. # TYPE node_sockstat_TCP_mem gauge node_sockstat_TCP_mem 0 # HELP node_sockstat_TCP_mem_bytes Number of TCP sockets in state mem_bytes. # TYPE node_sockstat_TCP_mem_bytes gauge node_sockstat_TCP_mem_bytes 0 # HELP node_sockstat_TCP_orphan Number of TCP sockets in state orphan. # TYPE node_sockstat_TCP_orphan gauge node_sockstat_TCP_orphan 0 # HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. # TYPE node_sockstat_TCP_tw gauge node_sockstat_TCP_tw 0 # HELP node_sockstat_UDP6_inuse Number of UDP6 sockets in state inuse. # TYPE node_sockstat_UDP6_inuse gauge node_sockstat_UDP6_inuse 0 # HELP node_sockstat_UDPLITE6_inuse Number of UDPLITE6 sockets in state inuse. # TYPE node_sockstat_UDPLITE6_inuse gauge node_sockstat_UDPLITE6_inuse 0 # HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. # TYPE node_sockstat_UDPLITE_inuse gauge node_sockstat_UDPLITE_inuse 0 # HELP node_sockstat_UDP_inuse Number of UDP sockets in state inuse. # TYPE node_sockstat_UDP_inuse gauge node_sockstat_UDP_inuse 0 # HELP node_sockstat_UDP_mem Number of UDP sockets in state mem. # TYPE node_sockstat_UDP_mem gauge node_sockstat_UDP_mem 250 # HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. # TYPE node_sockstat_UDP_mem_bytes gauge node_sockstat_UDP_mem_bytes 1.024e+06 # HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 3 # HELP node_softnet_backlog_len Softnet backlog status # TYPE node_softnet_backlog_len gauge node_softnet_backlog_len{cpu="0"} 0 node_softnet_backlog_len{cpu="1"} 0 node_softnet_backlog_len{cpu="2"} 0 node_softnet_backlog_len{cpu="3"} 0 # HELP node_softnet_cpu_collision_total Number of collision occur while obtaining device lock while transmitting # TYPE node_softnet_cpu_collision_total counter node_softnet_cpu_collision_total{cpu="0"} 0 node_softnet_cpu_collision_total{cpu="1"} 0 node_softnet_cpu_collision_total{cpu="2"} 0 node_softnet_cpu_collision_total{cpu="3"} 0 # HELP node_softnet_dropped_total Number of dropped packets # TYPE node_softnet_dropped_total counter node_softnet_dropped_total{cpu="0"} 0 node_softnet_dropped_total{cpu="1"} 1801 node_softnet_dropped_total{cpu="2"} 0 node_softnet_dropped_total{cpu="3"} 0 # HELP node_softnet_flow_limit_count_total Number of times flow limit has been reached # TYPE node_softnet_flow_limit_count_total counter node_softnet_flow_limit_count_total{cpu="0"} 0 node_softnet_flow_limit_count_total{cpu="1"} 0 node_softnet_flow_limit_count_total{cpu="2"} 0 node_softnet_flow_limit_count_total{cpu="3"} 0 # HELP node_softnet_processed_total Number of processed packets # TYPE node_softnet_processed_total counter node_softnet_processed_total{cpu="0"} 2.077315232e+09 node_softnet_processed_total{cpu="1"} 4.251448643e+09 node_softnet_processed_total{cpu="2"} 1.992194252e+09 node_softnet_processed_total{cpu="3"} 2.055944643e+09 # HELP node_softnet_received_rps_total Number of times cpu woken up received_rps # TYPE node_softnet_received_rps_total counter node_softnet_received_rps_total{cpu="0"} 0 node_softnet_received_rps_total{cpu="1"} 0 node_softnet_received_rps_total{cpu="2"} 0 node_softnet_received_rps_total{cpu="3"} 0 # HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota # TYPE node_softnet_times_squeezed_total counter node_softnet_times_squeezed_total{cpu="0"} 78709 node_softnet_times_squeezed_total{cpu="1"} 566823 node_softnet_times_squeezed_total{cpu="2"} 191274 node_softnet_times_squeezed_total{cpu="3"} 89616 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_available_info gauge node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 node_time_clocksource_available_info{clocksource="kvm-clock",device="0"} 1 node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 # HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_current_info gauge node_time_clocksource_current_info{clocksource="kvm-clock",device="0"} 1 # HELP node_time_seconds System time in seconds since epoch (1970). # TYPE node_time_seconds gauge node_time_seconds 1.7349253237891104e+09 # HELP node_time_zone_offset_seconds System time zone offset in seconds. # TYPE node_time_zone_offset_seconds gauge node_time_zone_offset_seconds{time_zone="UTC"} 0 # HELP node_timex_estimated_error_seconds Estimated error in seconds. # TYPE node_timex_estimated_error_seconds gauge node_timex_estimated_error_seconds 0 # HELP node_timex_frequency_adjustment_ratio Local clock frequency adjustment. # TYPE node_timex_frequency_adjustment_ratio gauge node_timex_frequency_adjustment_ratio 1.000036348022461 # HELP node_timex_loop_time_constant Phase-locked loop time constant. # TYPE node_timex_loop_time_constant gauge node_timex_loop_time_constant 7 # HELP node_timex_maxerror_seconds Maximum error in seconds. # TYPE node_timex_maxerror_seconds gauge node_timex_maxerror_seconds 0.03 # HELP node_timex_offset_seconds Time offset in between local system and reference clock. # TYPE node_timex_offset_seconds gauge node_timex_offset_seconds 0.000218813 # HELP node_timex_pps_calibration_total Pulse per second count of calibration intervals. # TYPE node_timex_pps_calibration_total counter node_timex_pps_calibration_total 0 # HELP node_timex_pps_error_total Pulse per second count of calibration errors. # TYPE node_timex_pps_error_total counter node_timex_pps_error_total 0 # HELP node_timex_pps_frequency_hertz Pulse per second frequency. # TYPE node_timex_pps_frequency_hertz gauge node_timex_pps_frequency_hertz 0 # HELP node_timex_pps_jitter_seconds Pulse per second jitter. # TYPE node_timex_pps_jitter_seconds gauge node_timex_pps_jitter_seconds 0 # HELP node_timex_pps_jitter_total Pulse per second count of jitter limit exceeded events. # TYPE node_timex_pps_jitter_total counter node_timex_pps_jitter_total 0 # HELP node_timex_pps_shift_seconds Pulse per second interval duration. # TYPE node_timex_pps_shift_seconds gauge node_timex_pps_shift_seconds 0 # HELP node_timex_pps_stability_exceeded_total Pulse per second count of stability limit exceeded events. # TYPE node_timex_pps_stability_exceeded_total counter node_timex_pps_stability_exceeded_total 0 # HELP node_timex_pps_stability_hertz Pulse per second stability, average of recent frequency changes. # TYPE node_timex_pps_stability_hertz gauge node_timex_pps_stability_hertz 0 # HELP node_timex_status Value of the status array bits. # TYPE node_timex_status gauge node_timex_status 8193 # HELP node_timex_sync_status Is clock synchronized to a reliable server (1 = yes, 0 = no). # TYPE node_timex_sync_status gauge node_timex_sync_status 1 # HELP node_timex_tai_offset_seconds International Atomic Time (TAI) offset. # TYPE node_timex_tai_offset_seconds gauge node_timex_tai_offset_seconds 0 # HELP node_timex_tick_seconds Seconds between clock ticks. # TYPE node_timex_tick_seconds gauge node_timex_tick_seconds 0.01 # HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. # TYPE node_udp_queues gauge node_udp_queues{ip="v4",queue="rx"} 0 node_udp_queues{ip="v4",queue="tx"} 0 node_udp_queues{ip="v6",queue="rx"} 0 node_udp_queues{ip="v6",queue="tx"} 0 # HELP node_uname_info Labeled system information as provided by the uname system call. # TYPE node_uname_info gauge node_uname_info{domainname="(none)",machine="x86_64",nodename="node-exporter-tbsnw",release="6.1.0-17-amd64",sysname="Linux",version="#1 SMP PREEMPT_DYNAMIC Debian 6.1.69-1 (2023-12-30)"} 1 # HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. # TYPE node_vmstat_oom_kill untyped node_vmstat_oom_kill 0 # HELP node_vmstat_pgfault /proc/vmstat information field pgfault. # TYPE node_vmstat_pgfault untyped node_vmstat_pgfault 1.159507384e+09 # HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. # TYPE node_vmstat_pgmajfault untyped node_vmstat_pgmajfault 3124 # HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. # TYPE node_vmstat_pgpgin untyped node_vmstat_pgpgin 566180 # HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. # TYPE node_vmstat_pgpgout untyped node_vmstat_pgpgout 3.7356089e+07 # HELP node_vmstat_pswpin /proc/vmstat information field pswpin. # TYPE node_vmstat_pswpin untyped node_vmstat_pswpin 0 # HELP node_vmstat_pswpout /proc/vmstat information field pswpout. # TYPE node_vmstat_pswpout untyped node_vmstat_pswpout 0 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 310.59 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 9 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 2.8442624e+07 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.73026764026e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 7.43645184e+08 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes 1.8446744073709552e+19 # HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. # TYPE promhttp_metric_handler_errors_total counter promhttp_metric_handler_errors_total{cause="encoding"} 0 promhttp_metric_handler_errors_total{cause="gathering"} 0 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 8620 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0