Prometheus API Query


Listed is the BHCO metrics mapping to Prometheus API queries. Not include derived metrics. Only core query calculation is listed, sum by different entities are not show in this list.

Some metircs are slightly different in different version of Prometheus. Here only the old version of metrics are listed. For example for node-exporter: node_memory_MemActive (old version) → node_memory_MemActive_Byte (newer version); for kubelet: container_cpu_usage_seconds (old version) → container_cpu_usage_seconds_total (newer version)

 In the ETL if the listed metrics are empty, it will call the newer version. 

EntityMetricPrometheus QueryComponent
ClusterCPU_NUMsum (count by (instance) (sum by(instance, cpu) (node_cpu))  ) node-exporter
ClusterCPU_NUMsum(count by (instance) (sum by (cpu, instance) (node_cpu_seconds_total))) node-exporter
ClusterCPU_NUMsum( kube_node_status_capacity_cpu_cores) node-exporter
ClusterCPU_NUMsum(kube_node_status_capacity{resource="cpu", unit="core" })node-exporter
ClusterCPU_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.cpu|cpu.*", type="hard" })kube-state-metric
ClusterST_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.storage.*", type="hard" })kube-state-metric
ClusterTOTAL_FS_SIZEsum (avg_over_time(node_filesystem_size{mountpoint="/" }[5m]))node-exporter
ClusterTOTAL_FS_SIZEsum (avg_over_time(node_filesystem_size_bytes{mountpoint="/" }[5m]))node-exporter
ClusterKPOD_NUM_MAXsum(kube_node_status_capacity{resource="pods" })kube-state-metric
ClusterKPOD_NUM_MAXsum( kube_node_status_capacity_pods) by (node)kube-state-metric
ClusterMEM_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.memory|memory.*", type="hard" })kube-state-metric
ClusterTOTAL_REAL_MEMsum(kube_node_status_capacity{resource="memory" })kube-state-metric
ClusterTOTAL_REAL_MEMsum( kube_node_status_capacity_memory_bytes) by (node)kube-state-metric
ClusterCPU_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.cpu.*", type="hard" })kube-state-metric
ClusterST_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.storage.*", type="hard" })kube-state-metric
ClusterKUBERNETES_VERSIONsum(kube_node_info) by (node, kubelet_version)kube-state-metric
ClusterMEM_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.memory.*", type="hard" }) kube-state-metric
ClusterMEM_REAL_UTIL(sum(avg_over_time(node_memory_MemTotal[5m])-avg_over_time(node_memory_MemFree[5m])-avg_over_time(node_memory_KernelStack[5m])-avg_over_time(node_memory_Cached[5m])-avg_over_time(node_memory_Buffers[5m])-avg_over_time(node_memory_Slab[5m])))  / (sum (avg_over_time(node_memory_MemTotal[5m]))) node-exporter
ClusterMEM_REAL_UTIL(sum(avg_over_time(node_memory_MemTotal_bytes[5m])-avg_over_time(node_memory_MemFree_bytes[5m])-avg_over_time(node_memory_KernelStack_bytes[5m])-avg_over_time(node_memory_Cached_bytes[5m])-avg_over_time(node_memory_Buffers_bytes[5m])-avg_over_time(node_memory_Slab_bytes[5m])))  / (sum (avg_over_time(node_memory_MemTotal_bytes[5m]))) node-exporter
ClusterCPU_LIMITsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_LIMITsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_LIMITsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_LIMITsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterTOTAL_FS_UTIL(1-(sum(avg_over_time(node_filesystem_free{mountpoint="/" }[5m]))/sum(avg_over_time(node_filesystem_size{mountpoint="/" }[5m]))))node-exporter
ClusterTOTAL_FS_UTIL(1-(sum(avg_over_time(node_filesystem_free_bytes{mountpoint="/" }[5m]))/sum(avg_over_time(node_filesystem_size_bytes{mountpoint="/" }[5m]))))node-exporter
ClusterKPOD_NUMcount(kube_pod_status_phase{phase="Running" }==1)kube-state-metric
ClusterCONTAINER_NUMcount (kube_pod_container_info{image!="" } * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_REQUESTsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_REQUESTsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_REQUESTsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterCPU_REQUESTsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterSERVICE_NUMcount(kube_service_info )kube-state-metric
ClusterMEM_REAL_USED(sum(avg_over_time(node_memory_MemTotal[5m]))) - (sum((avg_over_time(node_memory_MemFree[5m])+avg_over_time(node_memory_KernelStack[5m])+avg_over_time(node_memory_Cached[5m])+avg_over_time(node_memory_Buffers[5m])+avg_over_time(node_memory_Slab[5m]))))node-exporter
ClusterMEM_REAL_USED(sum(max_over_time(node_memory_MemTotal[5m]))) - (sum((min_over_time(node_memory_MemFree[5m])+min_over_time(node_memory_KernelStack[5m])+min_over_time(node_memory_Cached[5m])+min_over_time(node_memory_Buffers[5m])+min_over_time(node_memory_Slab[5m]))))node-exporter
ClusterMEM_REAL_USED(sum(min_over_time(node_memory_MemTotal[5m]))) - (sum((max_over_time(node_memory_MemFree[5m])+max_over_time(node_memory_KernelStack[5m])+max_over_time(node_memory_Cached[5m])+max_over_time(node_memory_Buffers[5m])+max_over_time(node_memory_Slab[5m]))))node-exporter
ClusterMEM_REAL_USED(sum (avg_over_time(node_memory_MemTotal_bytes[5m]))) - (sum(avg_over_time(node_memory_MemFree_bytes[5m])+avg_over_time(node_memory_KernelStack_bytes[5m])+avg_over_time(node_memory_Cached_bytes[5m])+avg_over_time(node_memory_Buffers_bytes[5m])+avg_over_time(node_memory_Slab_bytes[5m])))node-exporter
ClusterMEM_REAL_USED(sum (max_over_time(node_memory_MemTotal_bytes[5m]))) - (sum(min_over_time(node_memory_MemFree_bytes[5m])+min_over_time(node_memory_KernelStack_bytes[5m])+min_over_time(node_memory_Cached_bytes[5m])+min_over_time(node_memory_Buffers_bytes[5m])+min_over_time(node_memory_Slab_bytes[5m])))node-exporter
ClusterMEM_REAL_USED(sum (min_over_time(node_memory_MemTotal_bytes[5m]))) - (sum(max_over_time(node_memory_MemFree_bytes[5m])+max_over_time(node_memory_KernelStack_bytes[5m])+max_over_time(node_memory_Cached_bytes[5m])+max_over_time(node_memory_Buffers_bytes[5m])+max_over_time(node_memory_Slab_bytes[5m])))node-exporter
ClusterTOTAL_FS_USEDsum (avg_over_time(node_filesystem_size{mountpoint="/" }[5m])-avg_over_time(node_filesystem_free{mountpoint="/" }[5m]))node-exporter
ClusterTOTAL_FS_USEDsum (avg_over_time(node_filesystem_size_bytes{mountpoint="/" }[5m])-avg_over_time(node_filesystem_free_bytes{mountpoint="/"}[5m]))node-exporter
ClusterTOTAL_FS_FREEsum (avg_over_time(node_filesystem_free{mountpoint="/" }[5m]))node-exporter
ClusterTOTAL_FS_FREEsum (avg_over_time(node_filesystem_free_bytes{mountpoint="/" }[5m]))node-exporter
ClusterBYSTATUS_KPOD_NUMcount by (phase) (kube_pod_status_phase ==1)kube-state-metric
ClusterCPU_UTILsum(rate(node_cpu{mode!="idle",mode!="iowait" }[5m])) / sum(count by(instance)(sum by(instance,cpu)(node_cpu)))node-exporter
ClusterCPU_UTILsum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait" }[5m])) / sum(count by(instance)(sum by(cpu, instance)(node_cpu_seconds_total)))node-exporter
ClusterMEM_USED(sum(avg_over_time(node_memory_MemTotal[5m]) )) - (sum(avg_over_time(node_memory_MemFree[5m]) + avg_over_time(node_memory_KernelStack[5m])))node-exporter
ClusterMEM_USED(sum(max_over_time(node_memory_MemTotal[5m]) )) - (sum(min_over_time(node_memory_MemFree[5m]) + min_over_time(node_memory_KernelStack[5m])))node-exporter
ClusterMEM_USED(sum(min_over_time(node_memory_MemTotal[5m]) )) - (sum(max_over_time(node_memory_MemFree[5m]) + max_over_time(node_memory_KernelStack[5m])))node-exporter
ClusterMEM_USEDsum((sum(avg_over_time(node_memory_MemTotal_bytes[5m]) ) by (instance)) - (sum(avg_over_time(node_memory_MemFree_bytes[5m]) + avg_over_time(node_memory_KernelStack_bytes[5m])) by (instance)))node-exporter
ClusterMEM_USEDsum((sum(max_over_time(node_memory_MemTotal_bytes[5m]) ) by (instance)) - (sum(min_over_time(node_memory_MemFree_bytes[5m]) + min_over_time(node_memory_KernelStack_bytes[5m])) by (instance)))node-exporter
ClusterMEM_USEDsum((sum(min_over_time(node_memory_MemTotal_bytes[5m]) ) by (instance)) - (sum(max_over_time(node_memory_MemFree_bytes[5m]) + max_over_time(node_memory_KernelStack_bytes[5m])) by (instance)))node-exporter
ClusterMEM_UTIL1 - sum ((avg_over_time(node_memory_MemFree[5m]) + avg_over_time(node_memory_KernelStack[5m]) )) / sum (avg_over_time(node_memory_MemTotal[5m]))node-exporter
ClusterMEM_UTIL1 - sum ((avg_over_time(node_memory_MemFree_bytes[5m]) + avg_over_time(node_memory_KernelStack_bytes[5m]) )) / sum (avg_over_time(node_memory_MemTotal_bytes[5m]))node-exporter
ClusterST_ALLOCATEDsum(avg_over_time(kube_persistentvolumeclaim_resource_requests_storage_bytes[5m]))kube-state-metric
ClusterMEM_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m])))kube-state-metric
ClusterMEM_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m])))kube-state-metric
ClusterMEM_ACTIVEsum (avg_over_time(node_memory_Active[5m]))node-exporter
ClusterMEM_ACTIVEsum (max_over_time(node_memory_Active[5m]))node-exporter
ClusterMEM_ACTIVEsum (avg_over_time(node_memory_Active_bytes[5m]))node-exporter
ClusterMEM_ACTIVEsum (max_over_time(node_memory_Active_bytes[5m]))node-exporter
ClusterCPU_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m])))kube-state-metric
ClusterCPU_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( max_over_time(kube_node_status_allocatable{resource="cpu" }[5m])))kube-state-metric
ClusterCPU_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m])))kube-state-metric
ClusterCPU_REQUEST_ALLOCATABLE(sum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( max_over_time(kube_node_status_allocatable{resource="cpu" }[5m])))kube-state-metric
ClusterMEM_PAGE_MAJOR_FAULT_RATEsum(rate(node_vmstat_pgmajfault[5m]))node-exporter
ClusterCONTROLLER_NUMcount((kube_daemonset_created or kube_statefulset_created or kube_replicaset_created or kube_replicationcontroller_created))kube-state-metric
ClusterCONTROLLER_NUMcount((kube_daemonset_labels or kube_statefulset_labels or kube_replicaset_labels or kube_replicationcontroller_labels))kube-state-metric
ClusterCPU_USED_NUMsum(rate(node_cpu{mode!="idle",mode!="iowait" }[5m]))node-exporter
ClusterCPU_USED_NUMsum(max_over_time((rate(node_cpu{mode!="idle",mode!="iowait" }[2m]))[5m:]))node-exporter
ClusterCPU_USED_NUMsum(min_over_time((rate(node_cpu{mode!="idle",mode!="iowait" }[2m]))[5m:]))node-exporter
ClusterCPU_USED_NUMsum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait" }[5m]))node-exporter
ClusterCPU_USED_NUMsum(max_over_time((rate(node_cpu_seconds_total{mode!="idle",mode!="iowait" }[2m]))[5m:]))node-exporter
ClusterCPU_USED_NUMsum(min_over_time((rate(node_cpu_seconds_total{mode!="idle",mode!="iowait" }[2m]))[5m:]))node-exporter
ClusterCPU_ALLOCATABLEsum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m]))kube-state-metric
ClusterSECRET_NUMcount(kube_secret_info)kube-state-metric
ClusterJOB_NUMcount (kube_job_info)kube-state-metric
ClusterMEM_REQUESTsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_REQUESTsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_REQUESTsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_REQUESTsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_KLIMITsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_KLIMITsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_KLIMITsum (max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEM_KLIMITsum (max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
ClusterMEMORY_ALLOCATABLEsum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m]))kube-state-metric
NodeCPU_NUMsum by (node) ((count by (instance) (sum by(instance, cpu) (node_cpu))  ) * on (instance ) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")))node-exporter
NodeCPU_NUMsum( kube_node_status_capacity_cpu_cores) by (node)node-exporter
NodeCPU_NUMsum by (node)(kube_node_status_capacity{resource="cpu", unit="core" })node-exporter
NodeCPU_NUM(count by (instance) (sum by (cpu, instance) (node_cpu_seconds_total))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")  node-exporter
NodeTOTAL_FS_SIZE(sum by (instance) (avg_over_time(node_filesystem_size{mountpoint="/" }[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeTOTAL_FS_SIZE(sum by (instance) (avg_over_time(node_filesystem_size_bytes{mountpoint="/" }[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeKPOD_NUM_MAXsum( kube_node_status_capacity{resource="pods" }) by (node)kube-state-metric
NodeKPOD_NUM_MAXsum( kube_node_status_capacity_pods) by (node)kube-state-metric
NodeCREATION_TIMEsum(kube_node_created) by (node)kube-state-metric
NodeOS_TYPEcount(kube_node_info) by (node, os_image)kube-state-metric
NodeMAINTENANCE_MODEsum(kube_node_spec_unschedulable) by (node) kube-state-metric
NodeTOTAL_REAL_MEMsum(kube_node_status_capacity{resource="memory" }) by (node)kube-state-metric
NodeTOTAL_REAL_MEMsum( kube_node_status_capacity_memory_bytes) by (node)kube-state-metric
NodeKUBERNETES_VERSIONsum(kube_node_info) by (node, kubelet_version)kube-state-metric
NodeMEM_REAL_UTIL(sum by (instance) (avg_over_time(node_memory_MemTotal[5m])-avg_over_time(node_memory_MemFree[5m])-avg_over_time(node_memory_KernelStack[5m])-avg_over_time(node_memory_Cached[5m])-avg_over_time(node_memory_Buffers[5m])-avg_over_time(node_memory_Slab[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")  / ((sum by (instance) (avg_over_time(node_memory_MemTotal[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")  )node-exporter
NodeMEM_REAL_UTIL((sum by (instance) (avg_over_time(node_memory_MemTotal_bytes[5m])-avg_over_time(node_memory_MemFree_bytes[5m])-avg_over_time(node_memory_KernelStack_bytes[5m])-avg_over_time(node_memory_Cached_bytes[5m])-avg_over_time(node_memory_Buffers_bytes[5m])-avg_over_time(node_memory_Slab_bytes[5m]))) /(sum by (instance) (avg_over_time(node_memory_MemTotal_bytes[5m])))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeCPU_LIMITsum by (node)(max by (container, namespace, pod, node)(avg_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (container, namespace, pod, node) group_right() (sum by (pod, namespace, container, node) (kube_pod_container_info * on (uid, pod, namespace) group_left(node) kube_pod_info) * on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeCPU_LIMITsum by (node)(max by (container, namespace, pod, node)(max_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (container, namespace, pod, node) group_right() (sum by (pod, namespace, container, node) (kube_pod_container_info * on (uid, pod, namespace) group_left(node) kube_pod_info) * on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeCPU_LIMITsum by (node)(max by (pod, namespace, node, container)(avg_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeCPU_LIMITsum by (node)(max by (pod, namespace, node, container)(max_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeNET_OUT_BIT_RATE(sum by (instance) (rate(node_network_transmit_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_OUT_BIT_RATE(sum by (instance) (rate(node_network_transmit_bytes_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_IN_ERROR_RATE(sum by (instance) (rate(node_network_receive_errs[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_IN_ERROR_RATE(sum by (instance) (rate(node_network_receive_errs_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeTOTAL_FS_UTIL(sum by (instance) (1-(avg_over_time(node_filesystem_free{mountpoint="/" }[5m])/avg_over_time(node_filesystem_size{mountpoint="/" }[5m])))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeTOTAL_FS_UTIL(sum by (instance) (1-(avg_over_time(node_filesystem_free_bytes{mountpoint="/"}[5m]) / avg_over_time(node_filesystem_size_bytes{mountpoint="/"}[5m])))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeKPOD_NUMcount(max by (pod, namespace, node) (kube_pod_info) * on (pod, namespace) group_left() kube_pod_status_phase{phase="Running" }==1) by (node)kube-state-metric
NodeCONTAINER_NUMcount by (node) ((max by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info)) * on (namespace, pod, node) group_left() (max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_right() max by (node, pod, namespace) (kube_pod_info)))kube-state-metric
NodeCPU_REQUESTsum by (node)(max by (container, namespace, pod, node)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (container, namespace, pod, node) group_right() (sum by (pod, namespace, container, node) (kube_pod_container_info * on (uid, pod, namespace) group_left(node) kube_pod_info) * on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeCPU_REQUESTsum by (node)(max by (container, namespace, pod, node)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (container, namespace, pod, node) group_right() (sum by (pod, namespace, container, node) (kube_pod_container_info * on (uid, pod, namespace) group_left(node) kube_pod_info) * on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeCPU_REQUESTsum by (node)(max by (pod, namespace, node, container)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeCPU_REQUESTsum by (node)(max by (pod, namespace, node, container)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeNET_OUT_BYTE_RATE(sum by (instance) (rate(node_network_transmit_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_OUT_BYTE_RATE(sum by (instance) (rate(node_network_transmit_bytes_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (avg_over_time(node_memory_MemTotal[5m])-avg_over_time(node_memory_MemFree[5m])-avg_over_time(node_memory_KernelStack[5m])-avg_over_time(node_memory_Cached[5m])-avg_over_time(node_memory_Buffers[5m])-avg_over_time(node_memory_Slab[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (max_over_time(node_memory_MemTotal[5m])-min_over_time(node_memory_MemFree[5m])-min_over_time(node_memory_KernelStack[5m])-min_over_time(node_memory_Cached[5m])-min_over_time(node_memory_Buffers[5m])-min_over_time(node_memory_Slab[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (min_over_time(node_memory_MemTotal[5m])-max_over_time(node_memory_MemFree[5m])-max_over_time(node_memory_KernelStack[5m])-max_over_time(node_memory_Cached[5m])-max_over_time(node_memory_Buffers[5m])-max_over_time(node_memory_Slab[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (avg_over_time(node_memory_MemTotal_bytes[5m])-avg_over_time(node_memory_MemFree_bytes[5m])-avg_over_time(node_memory_KernelStack_bytes[5m])-avg_over_time(node_memory_Cached_bytes[5m])-avg_over_time(node_memory_Buffers_bytes[5m])-avg_over_time(node_memory_Slab_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (max_over_time(node_memory_MemTotal_bytes[5m])-min_over_time(node_memory_MemFree_bytes[5m])-min_over_time(node_memory_KernelStack_bytes[5m])-min_over_time(node_memory_Cached_bytes[5m])-min_over_time(node_memory_Buffers_bytes[5m])-min_over_time(node_memory_Slab_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_REAL_USED(sum by (instance) (min_over_time(node_memory_MemTotal_bytes[5m])-max_over_time(node_memory_MemFree_bytes[5m])-max_over_time(node_memory_KernelStack_bytes[5m])-max_over_time(node_memory_Cached_bytes[5m])-max_over_time(node_memory_Buffers_bytes[5m])-max_over_time(node_memory_Slab_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeTOTAL_FS_USED(sum by (instance) (avg_over_time(node_filesystem_size{mountpoint="/" }[5m])-avg_over_time(node_filesystem_free{mountpoint="/" }[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeTOTAL_FS_USED(sum by (instance) (avg_over_time(node_filesystem_size_bytes{mountpoint="/" }[5m])-avg_over_time(node_filesystem_free_bytes{mountpoint="/" }[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeTOTAL_FS_FREE(sum by (instance) (avg_over_time(node_filesystem_free{mountpoint="/" }[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeTOTAL_FS_FREE(sum by (instance) (avg_over_time(node_filesystem_free_bytes{mountpoint="/" }[5m]) )) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeBYSTATUS_KPOD_NUMcount by (phase, node) ((sum by (pod, namespace, phase) (kube_pod_status_phase==1))* on (namespace, pod) group_left(node) (max by(namespace, pod) (kube_pod_status_phase==1) * on (namespace, pod) group_left(node) max by (node, pod, namespace) (kube_pod_info)))kube-state-metric
NodeCPU_UTILsum by (node)((sum by (instance) (rate(node_cpu{mode!="idle", mode!="iowait" }[5m])))  * on (instance ) group_left(node) label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")) /sum by (node)((count by (instance) (sum by(instance, cpu) (node_cpu)) ) * on (instance ) group_left(node) label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))node-exporter
NodeCPU_UTIL(sum by (instance) (rate(node_cpu_seconds_total{mode!="idle", mode!="iowait" }[5m])))  / (count by (instance) (sum by (cpu, instance) (node_cpu_seconds_total))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")  node-exporter
NodeMEM_USEDsum by (node)((sum by(instance) (avg_over_time(node_memory_MemTotal[5m]) ) - (sum by(instance) (avg_over_time(node_memory_MemFree[5m]) + avg_over_time(node_memory_KernelStack[5m]))))* on (instance) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")))node-exporter
NodeMEM_USEDsum by (node)((sum by(instance) (max_over_time(node_memory_MemTotal[5m]) ) - (sum by(instance) (min_over_time(node_memory_MemFree[5m]) + min_over_time(node_memory_KernelStack[5m]))))* on (instance) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")))node-exporter
NodeMEM_USEDsum by (node)((sum by(instance) (min_over_time(node_memory_MemTotal[5m]) ) - (sum by(instance) (max_over_time(node_memory_MemFree[5m]) + max_over_time(node_memory_KernelStack[5m]))))* on (instance) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")))node-exporter
NodeMEM_USED(sum by (instance) (avg_over_time(node_memory_MemTotal_bytes[5m])-avg_over_time(node_memory_MemFree_bytes[5m]) - avg_over_time(node_memory_KernelStack_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_USED(sum by (instance) (max_over_time(node_memory_MemTotal_bytes[5m])-min_over_time(node_memory_MemFree_bytes[5m]) - min_over_time(node_memory_KernelStack_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_USED(sum by (instance) (min_over_time(node_memory_MemTotal_bytes[5m])-max_over_time(node_memory_MemFree_bytes[5m]) - max_over_time(node_memory_KernelStack_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_OUT_ERROR_RATE(sum by (instance) (rate(node_network_transmit_errs[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_OUT_ERROR_RATE(sum by (instance) (rate(node_network_transmit_errs_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_BIT_RATE(sum by (instance) (rate(node_network_receive_bytes[5m])+rate(node_network_transmit_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_BIT_RATE(sum by (instance) (rate(node_network_receive_bytes_total[5m])+rate(node_network_transmit_bytes_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_UTIL  sum by (node)(1- (sum by(instance) (avg_over_time(node_memory_MemFree[5m]) +  avg_over_time(node_memory_KernelStack[5m])))/(sum by(instance) (avg_over_time(node_memory_MemTotal[5m]) ))* on (instance) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)")))node-exporter
NodeMEM_UTIL(1- (sum by (instance) (avg_over_time(node_memory_MemFree_bytes[5m]) + avg_over_time(node_memory_KernelStack_bytes[5m]))) /(sum by (instance) (avg_over_time(node_memory_MemTotal_bytes[5m])))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeMEM_REQUEST_ALLOCATABLE(sum by (node)((max by (container, image,namespace, pod, node)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1))))/(sum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m])) by (node))kube-state-metric
NodeMEM_REQUEST_ALLOCATABLE(sum by (node)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m])) by (node))kube-state-metric
NodeLOAD_AVGmax_over_time(instance:node_load1_per_cpu:ratio[5m])  * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeLOAD_AVG(sum by (node) ((count by (instance)(max_over_time(node_load1[5m]))/(count by (instance) (sum by(instance, cpu) (node_cpu)) ) ) * on (instance ) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))))node-exporter
NodeLOAD_AVG sum by (node)( label_replace(max_over_time(node_load1[5m]), "node", "$1", "instance", "(.*)")) / (sum by (node)(kube_node_status_capacity_cpu_cores))node-exporter
NodeLOAD_AVG sum by (node)( label_replace(max_over_time(node_load1[5m]), "node", "$1", "instance", "(.*)")) / (sum by (node)(kube_node_status_capacity{resource="cpu", unit="core" }))node-exporter
NodeLOAD_AVG(sum by (node) ((count by (instance)(max_over_time(node_load1[5m]))/(count by (instance) (sum by(instance, cpu) (node_cpu_seconds_total)) ) ) * on (instance ) group_left(node) (label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))))node-exporter
NodeMEM_ACTIVE(sum by (instance) (avg_over_time(node_memory_Active[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeMEM_ACTIVE(sum by (instance) (max_over_time(node_memory_Active[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeMEM_ACTIVE(sum by (instance) (avg_over_time(node_memory_Active_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_ACTIVE(sum by (instance) (max_over_time(node_memory_Active_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeCPU_REQUEST_ALLOCATABLE(sum by (node)((max by (container, image,namespace, pod, node)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1))))/(sum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m])) by (node))kube-state-metric
NodeCPU_REQUEST_ALLOCATABLE(sum by (node)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))/(sum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m])) by (node))kube-state-metric
NodeMEM_PAGE_MAJOR_FAULT_RATE(sum by (instance) (rate(node_vmstat_pgmajfault[5m])))* on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeCPU_USED_NUMsum by (node)((sum by (instance) (rate(node_cpu{mode!="idle", mode!="iowait" }[5m])))  * on (instance ) group_left(node) label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))node-exporter
NodeCPU_USED_NUMsum by (node)((sum by (instance) (max_over_time((rate(node_cpu{mode!="idle", mode!="iowait" }[2m]))[5m:])))  * on (instance ) group_left(node) label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))node-exporter
NodeCPU_USED_NUMsum by (node)((sum by (instance) (min_over_time((rate(node_cpu{mode!="idle", mode!="iowait" }[2m]))[5m:])))  * on (instance ) group_left(node) label_replace(node_uname_info, "node", "$1", "nodename", "(.*)"))node-exporter
NodeCPU_USED_NUM(sum by (instance) (rate(node_cpu_seconds_total{mode!="idle", mode!="iowait" }[5m]) )) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeCPU_USED_NUM(sum by (instance) (max_over_time((rate(node_cpu_seconds_total{mode!="idle", mode!="iowait" }[2m]))[5m:]) )) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeCPU_USED_NUM(sum by (instance) (min_over_time((rate(node_cpu_seconds_total{mode!="idle", mode!="iowait" }[2m]))[5m:]) )) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeCPU_ALLOCATABLEsum( avg_over_time(kube_node_status_allocatable{resource="cpu" }[5m])) by (node)kube-state-metric
NodeMEM_REQUESTsum by (node)((max by (container, image,namespace, pod, node)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeMEM_REQUESTsum by (node)((max by (container, image,namespace, pod, node)(max_over_time(kube_pod_container_resource_requests{resource="memory" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeMEM_REQUESTsum by (node)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeMEM_REQUESTsum by (node)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeUPTIME(sum by (instance) (time() - node_boot_time)) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeUPTIME(sum by (instance) (time() - node_boot_time_seconds)) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)") node-exporter
NodeNET_IN_BYTE_RATE(sum by (instance) (rate(node_network_receive_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_IN_BYTE_RATE(sum by (instance) (rate(node_network_receive_bytes_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEM_KLIMITsum by (node)((max by (container, image,namespace, pod, node)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeMEM_KLIMITsum by (node)((max by (container, image,namespace, pod, node)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m]))) * on (container, namespace, pod, node) group_right() ((sum by (pod, namespace, image, container, node) (kube_pod_container_info * on (uid, namespace, pod) group_left(node) kube_pod_info))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)))kube-state-metric
NodeMEM_KLIMITsum by (node)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeMEM_KLIMITsum by (node)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NodeNET_IN_BIT_RATE(sum by (instance) (rate(node_network_receive_bytes[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeNET_IN_BIT_RATE(sum by (instance) (rate(node_network_receive_bytes_total[5m]))) * on (instance) group_left(node) label_replace(node_uname_info,"node",  "$1", "nodename", "(.*)")node-exporter
NodeMEMORY_ALLOCATABLEsum( avg_over_time(kube_node_status_allocatable{resource="memory" }[5m])) by (node)kube-state-metric
Pod WorkloadCREATION_TIMEsum(kube_pod_created  ) by (namespace, pod)kube-state-metric
Pod WorkloadHOST_NAMEsum(kube_pod_info  ) by (namespace, pod, node)kube-state-metric
Pod WorkloadKPOD_STATUSsum by (namespace,phase, pod) (kube_pod_status_phase  )kube-state-metric
NamespaceCPU_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.cpu|cpu.*", type="hard" }) by (namespace)kube-state-metric
NamespaceST_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.storage.*", type="hard" }) by (namespace)kube-state-metric
NamespaceCREATION_TIMEsum(kube_namespace_created) by (namespace)kube-state-metric
NamespaceKPOD_NUM_MAXsum(kube_resourcequota{resource =~ ".*pods.*", type="hard" }) by (namespace)kube-state-metric
NamespaceKPOD_NUM_MAXsum(kube_node_status_capacity{resource=~".*pods.*" }) by (namespace)kube-state-metric
NamespaceMEM_REQUEST_MAXsum(kube_resourcequota{resource =~ ".*requests.memory|memory.*", type="hard" }) by (namespace)kube-state-metric
NamespaceCPU_LIMITRANGESsum(kube_limitrange{resource=~".*cpu.*",type='Pod',constraint='max' }) by (namespace)kube-state-metric
NamespaceMEM_LIMITRANGESsum(kube_limitrange{resource=~".*memory.*",type='Pod',constraint='max' }) by (namespace)kube-state-metric
NamespaceCPU_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.cpu.*", type="hard" }) by (namespace)kube-state-metric
NamespaceST_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.storage.*", type="hard" }) by (namespace)kube-state-metric
NamespaceMEM_LIMIT_MAXsum(kube_resourcequota{resource =~ ".*limits.memory.*", type="hard" }) by (namespace)kube-state-metric
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )/ sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTILsum(max by (container, namespace, pod)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTILsum(max by (container, namespace, pod)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace) / sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTILsum(max by (container, namespace, pod)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTILsum(max by (container, namespace, pod)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)/ sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )/ sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) ) / sum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceMEM_REAL_UTIL(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )/ sum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceCPU_LIMITsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_LIMITsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_LIMITsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_LIMITsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceNET_OUT_BIT_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_transmit_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_OUT_BIT_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_transmit_bytes_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_IN_ERROR_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_receive_errors_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_IN_ERROR_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_receive_errors_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceKPOD_NUMcount(kube_pod_status_phase{phase="Running" }==1) by (namespace)kube-state-metric
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_ACTIVEsum(max by (container, namespace, pod)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_ACTIVEsum(max by (container, namespace, pod)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_ACTIVE(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCONTAINER_NUMcount(kube_pod_container_info * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kube-state-metric
NamespaceCPU_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceCPU_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceNET_OUT_BYTE_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_transmit_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_OUT_BYTE_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_transmit_bytes_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container) (label_replace(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container) (label_replace(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container) (label_replace(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUMsum by (namespace) ( sum by (namespace, pod, container) ((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD"}[5m])))*on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceCPU_USED_NUMsum by (namespace) ( sum by (namespace, pod, container) (max_over_time((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD"}[2m]))[5m:]))*on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceCPU_USED_NUMsum by (namespace) ( sum by (namespace, pod, container) (min_over_time((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD"}[2m]))[5m:]))*on (namespace, pod) group_left() max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceCPU_USED_NUM(sum by (namespace)(sum by (pod, namespace, container)(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USEDsum(max by (container, namespace, pod)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_REAL_USEDsum(max by (container, namespace, pod)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_REAL_USEDsum(max by (container, namespace, pod)((min_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceMEM_REAL_USED(sum by ( namespace)(max by (pod, namespace, container)(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1)) )kubelet
NamespaceTOTAL_FS_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceTOTAL_FS_USEDsum ( max by (namespace, pod, container)((avg_over_time(container_fs_usage_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceTOTAL_FS_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceTOTAL_FS_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_REQUESTsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceNET_IN_BYTE_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_receive_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_IN_BYTE_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_receive_bytes_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceBYSTATUS_KPOD_NUMcount(kube_pod_status_phase==1) by (phase, namespace)kube-state-metric
NamespaceCPU_UTILsum by (namespace)(max by (pod, namespace, container)(label_replace(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )/ sum by (namespace)(count by (namespace, pod, container)(label_replace(label_replace(container_cpu_usage_seconds_total, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)"))* on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )kubelet
NamespaceCPU_UTILsum BY (namespace) ( max by (namespace, pod, container) ((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) / sum by (namespace)(count by (namespace, pod, container)((container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }))* on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1))kubelet
NamespaceCPU_UTILsum by (namespace)(max by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )/ sum by (namespace)(count by (namespace, pod, container)(label_replace(container_cpu_usage_seconds_total, "pod", "$1", "pod_name", "(.*)"))* on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )kubelet
NamespaceCPU_UTILsum by (namespace)(max by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )/ sum by (namespace)(count by (namespace, pod, container)(label_replace(container_cpu_usage_seconds_total,  "container", "$1", "container_name", "(.*)"))* on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) )kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(label_replace(min_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USEDsum ( max by (namespace, pod, container)((avg_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_USEDsum ( max by (namespace, pod, container)((max_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_USEDsum ( max by (namespace, pod, container)((min_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)) by (namespace)kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(min_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceMEM_USED(sum by (namespace)(max by (pod, namespace, container)(label_replace(min_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) ))kubelet
NamespaceNET_OUT_ERROR_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_transmit_errors_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_OUT_ERROR_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_transmit_errors_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceMEM_KLIMITsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_KLIMITsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_KLIMITsum by (namespace)(max by (pod, namespace, node, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceMEM_KLIMITsum by (namespace)(max by (pod, namespace, node, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace) group_left() max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))kube-state-metric
NamespaceNET_BIT_RATEavg by (namespace)(( max by (namespace, pod) (label_replace(rate(container_network_transmit_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)")+label_replace(rate(container_network_receive_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_BIT_RATEavg by (namespace)(( max by (namespace, pod) ((rate(container_network_transmit_bytes_total[5m]))+(rate(container_network_receive_bytes_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceST_ALLOCATEDsum by (namespace) (avg_over_time(kube_persistentvolumeclaim_resource_requests_storage_bytes[5m])) kube-state-metric
NamespaceNET_IN_BIT_RATEsum by (namespace)((max by (namespace, pod) (label_replace(rate(container_network_receive_bytes_total[5m]), "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
NamespaceNET_IN_BIT_RATEsum by (namespace)((max by (namespace, pod) ((rate(container_network_receive_bytes_total[5m])))) * on (pod, namespace) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1)))kubelet
ControllerCREATION_TIMEsum by (namespace, owner_kind, owner_name)(label_replace(kube_daemonset_created, "owner_name","$1", "daemonset", "(.*)") * on (owner_name, namespace) group_left(owner_kind) sum by (namespace, owner_name, owner_kind) ( kube_pod_owner{owner_kind=~"[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]" }) )or (sum by (namespace, owner_kind, owner_name)(label_replace(kube_statefulset_created, "owner_name","$1", "statefulset", "(.*)") * on (owner_name, namespace) group_left(owner_kind) sum by (namespace, owner_name, owner_kind) ( kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]" }) ) )or (sum by (namespace, owner_kind, owner_name)(label_replace(kube_replicaset_created, "owner_name","$1", "replicaset", "(.*)") * on (owner_name, namespace) group_left(owner_kind) sum by (namespace, owner_name, owner_kind) ( kube_pod_owner{owner_kind=~"[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]" }) ) )or (sum by (namespace, owner_kind, owner_name)(label_replace(kube_replicationcontroller_created, "owner_name","$1", "replicationcontroller", "(.*)") * on (owner_name, namespace) group_left(owner_kind) sum by (namespace, owner_name, owner_kind) ( kube_pod_owner{owner_kind=~"[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }) ) )kube-state-metric
ControllerCONTROLLER_TYPEcount(kube_pod_owner{owner_kind!="<none>", owner_kind!~"[Jj][Oo][Bb]" }) by (owner_name, owner_kind, namespace)kube-state-metric
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(sum_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum(max by (container, pod, namespace)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum(max by (container, pod, namespace)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum(max by (container, pod, namespace)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum(max by (container, pod, namespace)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerMEM_REAL_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kubelet
ControllerBYIMAGE_CPU_REQUESTsum by (namespace, owner_name, owner_kind, image)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (container, namespace, pod, image) group_right() ((sum by (pod, namespace, image, container) (kube_pod_container_info)) * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_LIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_LIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_LIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_LIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerNET_OUT_BIT_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_transmit_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_OUT_BIT_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_transmit_bytes_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_IN_ERROR_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_receive_errors_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_IN_ERROR_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_receive_errors_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerKPOD_NUMcount((kube_pod_status_phase{phase="Running" } * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]"})))==1)  by (owner_kind, owner_name, namespace)kube-state-metric
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_ACTIVEsum(max by (container, pod, namespace)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)kubelet
ControllerMEM_ACTIVEsum(max by (container, pod, namespace)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)kubelet
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_ACTIVEsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCONTAINER_NUMcount(kube_pod_container_info * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (namespace, owner_name, owner_kind)kube-state-metric
ControllerCPU_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerCPU_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerNET_OUT_BYTE_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_transmit_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_OUT_BYTE_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_transmit_bytes_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by ( namespace, owner_name, owner_kind) (sum by (namespace, pod, container) ((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m])) )  *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerCPU_USED_NUMsum by ( namespace, owner_name, owner_kind) (sum by (namespace, pod, container) (max_over_time((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[2m]))[5m:]))  *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerCPU_USED_NUMsum by ( namespace, owner_name, owner_kind) (sum by (namespace, pod, container) (min_over_time((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[2m]))[5m:]))  *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(max_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_USED_NUMsum by (namespace, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(min_over_time((rate(container_cpu_usage_seconds_total[2m]))[5m:]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum(max by (container, pod, namespace)((avg_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)kubelet
ControllerMEM_REAL_USEDsum(max by (container, pod, namespace)((max_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)kubelet
ControllerMEM_REAL_USEDsum(max by (container, pod, namespace)((min_over_time(container_memory_working_set_bytes{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (owner_name, namespace, owner_kind)kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REAL_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(min_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerTOTAL_FS_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerTOTAL_FS_USEDsum by ( namespace, owner_name, owner_kind)(max by (pod, namespace, container)((avg_over_time(container_fs_usage_bytes{image!="", container!="", container!="POD" }[5m])))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerTOTAL_FS_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerTOTAL_FS_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_REQUESTsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerBYSTATUS_KPOD_NUMcount((kube_pod_status_phase * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]"})))==1)  by (owner_kind, owner_name, namespace, phase)kube-state-metric
ControllerNET_IN_BYTE_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_receive_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_IN_BYTE_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_receive_bytes_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerBYIMAGE_MEM_REQUESTsum by (namespace, owner_name, owner_kind,image)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (container, namespace, pod, image) group_right() ((sum by (pod, namespace, image, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) )kube-state-metric
ControllerCPU_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container)(label_replace(label_replace(rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)"))/(count by (pod, namespace, container)(label_replace(label_replace(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)"))) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_UTILsum by (namespace,owner_kind, owner_name)(max by (namespace, pod, container)((rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m]))) *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })) ) / (max by (namespace, owner_name, owner_kind)(count by (owner_kind, namespace, owner_name)((container_cpu_usage_seconds_total{image!="", container!="", container!="POD" } )  *on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))))kubelet
ControllerCPU_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m]), "pod", "$1", "pod_name", "(.*)"))/(count by (pod, namespace, container)(label_replace(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }, "pod", "$1", "pod_name", "(.*)"))) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerCPU_UTILsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container)(label_replace(rate(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }[5m]),  "container", "$1", "container_name", "(.*)"))/(count by (pod, namespace, container)(label_replace(container_cpu_usage_seconds_total{image!="", container!="", container!="POD" }, "container", "$1", "container_name", "(.*)"))) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (pod, namespace, container, owner_kind, owner_name)(label_replace(label_replace(min_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by ( namespace, owner_name, owner_kind)(max by (pod, namespace, container)((avg_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m])))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerMEM_USEDsum by ( namespace, owner_name, owner_kind)(max by (pod, namespace, container)((max_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m])))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerMEM_USEDsum by ( namespace, owner_name, owner_kind)(max by (pod, namespace, container)((min_over_time(container_memory_usage_bytes{image!="", container!="", container!="POD" }[5m])))*on (namespace, pod) group_left() max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(min_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerMEM_USEDsum by (namespace,  owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(min_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
ControllerNET_OUT_ERROR_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_transmit_errors_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_OUT_ERROR_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_transmit_errors_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerBYIMAGE_NUMcount(kube_pod_container_info * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))) by (namespace, owner_name, image, owner_kind)kube-state-metric
ControllerMEM_KLIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_KLIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_KLIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerMEM_KLIMITsum by (namespace, owner_name, owner_kind)(max by (pod, namespace, container)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (container, namespace, pod) group_right() ((sum by (pod, namespace, container) (kube_pod_container_info))  * on (namespace, pod) group_left () max by(namespace, pod) (kube_pod_status_phase{phase="Running" }==1)  * on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" }))))kube-state-metric
ControllerNET_BIT_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_receive_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)")+label_replace(rate(container_network_transmit_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_BIT_RATEavg by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_receive_bytes_total[5m])+rate(container_network_transmit_bytes_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_IN_BIT_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod) (label_replace(rate(container_network_receive_bytes_total[5m]),"pod", "$1", "pod_name", "(.*)") )  *on ( namespace, pod) group_left() (max by (pod, namespace) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
ControllerNET_IN_BIT_RATEsum by ( namespace, owner_name, owner_kind) (max by (namespace, pod, container) ((rate(container_network_receive_bytes_total[5m])) )  *on (namespace, pod) group_left() (max by (namespace, pod) (kube_pod_status_phase{phase="Running" }==1))* on (namespace, pod) group_left(owner_name, owner_kind) (sum by (namespace, pod, owner_kind, owner_name) (kube_pod_owner{owner_kind=~"[Ss][Tt][Aa][Tt][Ee][Ff][Uu][Ll][Ss][Ee][Tt]|[Dd][Aa][Ee][Mm][Oo][Nn][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Ss][Ee][Tt]|[Rr][Ee][Pp][Ll][Ii][Cc][Aa][Tt][Ii][Oo][Nn][Cc][Oo][Nn][Tt][Rr][Oo][Ll][Ll][Ee][Rr]" })))kubelet
Persistent VolumeST_TYPEsum by (storageclass, persistentvolume)(kube_persistentvolume_info)kube-state-metric
Persistent VolumeST_ALLOCATEDsum by (persistentvolume) (avg_over_time(kube_persistentvolumeclaim_resource_requests_storage_bytes[5m]) * on (persistentvolumeclaim, namespace) group_left(persistentvolume) max by(namespace, persistentvolume, persistentvolumeclaim) (label_replace(kube_persistentvolumeclaim_info{volumename!="" }, "persistentvolume", "$1", "volumename", "(.*)")))kube-state-metric
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))) / sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))) / sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(max_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))) / sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(max_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))) / sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REAL_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))/ sum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_TIMEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_gc_collection_seconds_sum[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_TIMEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_gc_collection_seconds_sum[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_TIMEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_gc_collection_seconds_sum[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadCPU_LIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container, node, image)(avg_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_LIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container, node, image)(max_over_time(kube_pod_container_resource_limits{resource="cpu" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_LIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_LIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_cpu_cores[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadNET_HAPROXY_BYTES_INsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(rate(haproxy_frontend_bytes_in_total[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))router-internal-default
Pod WorkloadKPOD_NUMsum by (pod, namespace, container, image, owner_kind, owner_name)( (max by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCONTAINER_NUMcount by (pod, namespace, container, image, owner_kind, owner_name)(kube_pod_container_info * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadNET_HAPROXY_BYTES_OUTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(rate(haproxy_frontend_bytes_out_total[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))router-internal-default
Pod WorkloadMEM_USED_HMsum by (pod, namespace, container, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USED_HMsum by (pod, namespace, container, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_usage_bytes[5m])))) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USED_HMsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USED_HMsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container, node, image)(avg_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (namespace, pod, container, node, image)(max_over_time(kube_pod_container_resource_requests{resource="cpu" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadCPU_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_requests_cpu_cores[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadRESTART_COUNTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(round(delta(kube_pod_container_status_restarts_total[5m]))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadNONHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="nonheap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_used{area="nonheap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="nonheap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])/avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])/avg_over_time(jvm_memory_bytes_max{area="heap" }[5m]),"namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_UTILsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])/avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)"))* on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadTOTAL_FS_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadTOTAL_FS_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_fs_usage_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadTOTAL_FS_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadTOTAL_FS_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_fs_usage_bytes[5m]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_committed{area="heap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_committed{area="heap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_committed{area="heap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_FREEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])-avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_FREEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])-avg_over_time(jvm_memory_bytes_used{area="heap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadHEAPMEM_FREEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="heap" }[5m])-avg_over_time(jvm_memory_bytes_used{area="heap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_rss[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(max_over_time(container_memory_rss[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_rss[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_rss[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_rss[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(max_over_time(container_memory_rss[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_rss[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_RSSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_rss[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUM_HMsum by (pod, namespace, container, owner_kind, owner_name)(sum by (pod, namespace, container, image)(label_replace(label_replace(rate(container_cpu_usage_seconds_total{container!="POD" }[5m]),"pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUM_HMsum by (pod, namespace, container, owner_kind, owner_name)(((sum by (namespace, pod, container)(rate(container_cpu_usage_seconds_total{container!="POD" }[5m])))) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUM_HMsum by (pod, namespace, container, image, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total{container!="POD" }[5m]),"pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUM_HMsum by (pod, namespace, container, image, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total{container!="POD"}[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_usage_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(max_over_time(container_memory_usage_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadMEM_USEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_usage_bytes[5m]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner))))kubelet
Pod WorkloadNET_HAPROXY_CONNECTIONSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(rate(haproxy_frontend_connections_total[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))router-internal-default
Pod WorkloadTHREAD_COUNTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_threads_current[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadTHREAD_COUNTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_threads_current[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadTHREAD_COUNTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_threads_current[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(avg_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(((max by (namespace, pod, container)(max_over_time(container_memory_working_set_bytes[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(avg_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_ACTIVEsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image, owner_kind, owner_name)(label_replace(max_over_time(container_memory_working_set_bytes[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadNONHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_committed{area="nonheap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_committed{area="nonheap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)"))* on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_COMMITTEDsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_committed{area="nonheap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="nonheap" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_memory_bytes_max{area="nonheap" }[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadNONHEAPMEM_MAXsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_memory_bytes_max{area="nonheap" }[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_EVENTSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_gc_collection_seconds_count[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_EVENTSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(label_replace(label_replace(label_replace(avg_over_time(jvm_gc_collection_seconds_count[5m]), "namespace", "$1", "kubernetes_namespace", "(.*)"), "pod", "$1", "kubernetes_pod_name", "(.*)"), "container", "$1", "app", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadGC_EVENTSsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(jvm_gc_collection_seconds_count[5m])) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))jmx-exporter
Pod WorkloadCPU_USED_NUMsum by (pod, namespace, container, image, owner_kind, owner_name)(sum by (pod, namespace, container, image)(label_replace(label_replace(rate(container_cpu_usage_seconds_total{container!="POD" }[5m]),"pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUMsum by (pod, namespace, container, image, owner_kind, owner_name)(((sum by (namespace, pod, container)(rate(container_cpu_usage_seconds_total{container!="POD" }[5m])))) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUMsum by (pod, namespace, container, image, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total{container!="POD"}[5m]),"pod", "$1", "pod_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadCPU_USED_NUMsum by (pod, namespace, container, image, owner_kind, owner_name)(sum by (namespace, pod, container)(label_replace(rate(container_cpu_usage_seconds_total{container!="POD"}[5m]), "container", "$1", "container_name", "(.*)")) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kubelet
Pod WorkloadMEM_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_requests{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_REQUESTsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_requests_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_KLIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_KLIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits{resource="memory" }[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_KLIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(avg_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
Pod WorkloadMEM_KLIMITsum by (pod, namespace, container, image, owner_kind, owner_name)(max by (pod, namespace, container, image)(max_over_time(kube_pod_container_resource_limits_memory_bytes[5m])) * on (pod, namespace, container) group_left(image) (sum by (pod, namespace, container, image)(kube_pod_container_info)) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running" }==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner)))kube-state-metric
ControllerBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, owner_kind, owner_name)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, owner_kind, owner_name)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, owner_kind, owner_name)(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, owner_kind, owner_name)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, owner_kind, owner_name)(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, owner_kind, owner_name)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, owner_kind, owner_name)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
ControllerBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, owner_kind, owner_name)(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, owner_kind, owner_name)(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, owner_kind, owner_name)(label_replace(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
ControllerBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, owner_kind, owner_name)(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, pod)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, pod)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_AVGquantile_over_time(0.90, avg by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, pod)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, pod)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MINquantile_over_time(0.75, min by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, pod)(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, pod)(label_replace(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, pod)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MAXavg by (namespace, container, pod)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, pod)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, pod)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_MAXquantile_over_time(0.95, max by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, pod)(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, pod)(label_replace(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, pod)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_SUMavg by (namespace, container, pod)(label_replace(quantile_over_time(0.95, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, pod)(container_memory_usage_bytes{container!="", container_name!=""} * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, pod)(label_replace(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_MEM_USED_HM_SUMquantile_over_time(0.95, sum by (namespace, container, pod)(label_replace(container_memory_usage_bytes{container!="", container_name!=""}, "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"}))))[5m:1m])kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, pod)(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, pod)(label_replace(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, pod)(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_AVGavg by (namespace, container, pod)(label_replace(quantile_over_time(0.90, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, pod)(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]) * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, pod)(label_replace(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)"), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, pod)(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "pod", "$1", "pod_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet
Pod WorkloadBYCONT_CPU_USED_NUM_HM_MINavg by (namespace, container, pod)(label_replace(quantile_over_time(0.75, rate(container_cpu_usage_seconds_total{container!="", container_name!=""}[10m])[5m:]), "container", "$1", "container_name", "(.*)") * on (pod, namespace, container) group_left() (sum by (pod, namespace, container)(kube_pod_container_info{}) * on (pod, namespace) group_left() max by(pod, namespace) (kube_pod_status_phase{phase="Running"}==1) * on (namespace, pod) group_left(owner_kind, owner_name) (sum by (namespace, pod, owner_kind, owner_name)(kube_pod_owner{owner_kind=~"Deployment|ReplicaSet|StatefulSet|DaemonSet"})))kubelet

 

 

 

Tip: For faster searching, add an asterisk to the end of your partial query. Example: cert*

BMC Helix Continuous Optimization 26.1