From bf2e8db81f59416efcfdd1569dd042878fa04048 Mon Sep 17 00:00:00 2001 From: ahakanbaba Date: Thu, 13 Oct 2016 02:05:50 -0700 Subject: [PATCH] Added metrics describing the cpu and mem requests per container . (#35) These metrics are retrieved using the v1 api directly. No additional accumulation or computation is done. The state is maintained per container in a pod in kubernetes. The metrics are also tagged by the node, where the containers are residing. Our use case for these metrics are as follows: It is useful to accumulate these per node and understand how much remaining un-requested resourced are available in each node. This would give an indication of the "utilization" of the cluster. --- README.md | 2 ++ pod.go | 28 ++++++++++++++++++ pod_test.go | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+) diff --git a/README.md b/README.md index 9ffe673978..3e90d0648b 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,8 @@ additional metrics! | kube_pod_container_status_terminated | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | | kube_pod_container_status_ready | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | | kube_pod_container_status_restarts | Counter | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name> | +| kube_pod_container_requested_cpu_millicores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-address> | +| kube_pod_container_requested_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-address> | ## kube-state-metrics vs. Heapster diff --git a/pod.go b/pod.go index 23c91ab2c5..9b89cf5fe6 100644 --- a/pod.go +++ b/pod.go @@ -73,6 +73,18 @@ var ( "The number of container restarts per container.", []string{"namespace", "pod", "container"}, nil, ) + + descPodContainerRequestedCpuMilliCores = prometheus.NewDesc( + "kube_pod_container_requested_cpu_millicores", + "The number of requested cpu millicores by a container.", + []string{"namespace", "pod", "container", "node"}, nil, + ) + + descPodContainerRequestedMemoryBytes = prometheus.NewDesc( + "kube_pod_container_requested_memory_bytes", + "The number of requested memory bytes by a container.", + []string{"namespace", "pod", "container", "node"}, nil, + ) ) type podStore interface { @@ -97,6 +109,8 @@ func (pc *podCollector) Describe(ch chan<- *prometheus.Desc) { ch <- descPodContainerStatusTerminated ch <- descPodContainerStatusReady ch <- descPodContainerStatusRestarts + ch <- descPodContainerRequestedCpuMilliCores + ch <- descPodContainerRequestedMemoryBytes } // Collect implements the prometheus.Collector interface. @@ -145,4 +159,18 @@ func (pc *podCollector) collectPod(ch chan<- prometheus.Metric, p v1.Pod) { addGauge(descPodContainerStatusReady, boolFloat64(cs.Ready), cs.Name) addCounter(descPodContainerStatusRestarts, float64(cs.RestartCount), cs.Name) } + + nodeName := p.Spec.NodeName + for _, c := range p.Spec.Containers { + req := c.Resources.Requests + if cpu, ok := req[v1.ResourceCPU]; ok { + addGauge(descPodContainerRequestedCpuMilliCores, float64(cpu.MilliValue()), + c.Name, nodeName) + } + if mem, ok := req[v1.ResourceMemory]; ok { + addGauge(descPodContainerRequestedMemoryBytes, float64(mem.Value()), + c.Name, nodeName) + } + + } } diff --git a/pod_test.go b/pod_test.go index af33a105e1..d243782aa7 100644 --- a/pod_test.go +++ b/pod_test.go @@ -19,6 +19,7 @@ package main import ( "testing" + "k8s.io/client-go/1.4/pkg/api/resource" "k8s.io/client-go/1.4/pkg/api/v1" ) @@ -54,6 +55,10 @@ func TestPodCollector(t *testing.T) { # TYPE kube_pod_status_ready gauge # HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. # TYPE kube_pod_status_scheduled gauge + # HELP kube_pod_container_requested_cpu_millicores The number of requested cpu millicores by a container. + # TYPE kube_pod_container_requested_cpu_millicores gauge + # HELP kube_pod_container_requested_memory_bytes The number of requested memory bytes by a container. + # TYPE kube_pod_container_requested_memory_bytes gauge ` cases := []struct { pods []v1.Pod @@ -372,6 +377,84 @@ func TestPodCollector(t *testing.T) { kube_pod_status_scheduled{condition="unknown",namespace="ns2",pod="pod2"} 0 `, metrics: []string{"kube_pod_status_scheduled"}, + }, { + pods: []v1.Pod{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Spec: v1.PodSpec{ + NodeName: "node1", + Containers: []v1.Container{ + v1.Container{ + Name: "pod1_con1", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewMilliQuantity(int64(200), resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100000000, resource.DecimalSI), + }, + }, + }, + v1.Container{ + Name: "pod1_con2", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewMilliQuantity(int64(300), resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(200000000, resource.DecimalSI), + }, + }, + }, + }, + }, + }, { + ObjectMeta: v1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Spec: v1.PodSpec{ + NodeName: "node2", + Containers: []v1.Container{ + v1.Container{ + Name: "pod2_con1", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewMilliQuantity(int64(400), resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(300000000, resource.DecimalSI), + }, + }, + }, + v1.Container{ + Name: "pod2_con2", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewMilliQuantity(int64(500), resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(400000000, resource.DecimalSI), + }, + }, + }, + // A container without a resource specicication. No metrics will be emitted for that. + v1.Container{ + Name: "pod2_con3", + }, + }, + }, + }, + }, + want: metadata + ` + kube_pod_container_requested_cpu_millicores{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 200 + kube_pod_container_requested_cpu_millicores{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 300 + kube_pod_container_requested_cpu_millicores{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 400 + kube_pod_container_requested_cpu_millicores{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 500 + kube_pod_container_requested_memory_bytes{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 1e+08 + kube_pod_container_requested_memory_bytes{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2e+08 + kube_pod_container_requested_memory_bytes{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3e+08 + kube_pod_container_requested_memory_bytes{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 4e+08 + `, + metrics: []string{ + "kube_pod_container_requested_cpu_millicores", + "kube_pod_container_requested_memory_bytes", + }, }, } for _, c := range cases {