forked from influxdata/telegraf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
daemonset_test.go
123 lines (117 loc) · 3.43 KB
/
daemonset_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
package kube_inventory
import (
"testing"
"time"
"github.com/ericchiang/k8s/apis/apps/v1beta2"
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
"github.com/influxdata/telegraf/testutil"
)
func TestDaemonSet(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
name string
handler *mockHandler
output *testutil.Accumulator
hasError bool
}{
{
name: "no daemon set",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/daemonsets/": &v1beta2.DaemonSetList{},
},
},
hasError: false,
},
{
name: "collect daemonsets",
handler: &mockHandler{
responseMap: map[string]interface{}{
"/daemonsets/": &v1beta2.DaemonSetList{
Items: []*v1beta2.DaemonSet{
{
Status: &v1beta2.DaemonSetStatus{
CurrentNumberScheduled: toInt32Ptr(3),
DesiredNumberScheduled: toInt32Ptr(5),
NumberAvailable: toInt32Ptr(2),
NumberMisscheduled: toInt32Ptr(2),
NumberReady: toInt32Ptr(1),
NumberUnavailable: toInt32Ptr(1),
UpdatedNumberScheduled: toInt32Ptr(2),
},
Metadata: &metav1.ObjectMeta{
Generation: toInt64Ptr(11221),
Namespace: toStrPtr("ns1"),
Name: toStrPtr("daemon1"),
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
},
},
},
},
},
},
output: &testutil.Accumulator{
Metrics: []*testutil.Metric{
{
Fields: map[string]interface{}{
"generation": int64(11221),
"current_number_scheduled": int32(3),
"desired_number_scheduled": int32(5),
"number_available": int32(2),
"number_misscheduled": int32(2),
"number_ready": int32(1),
"number_unavailable": int32(1),
"updated_number_scheduled": int32(2),
"created": now.UnixNano(),
},
Tags: map[string]string{
"daemonset_name": "daemon1",
"namespace": "ns1",
},
},
},
},
hasError: false,
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
acc := new(testutil.Accumulator)
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1beta2.DaemonSetList)).Items {
err := ks.gatherDaemonSet(*dset, acc)
if err != nil {
t.Errorf("Failed to gather daemonset - %s", err.Error())
}
}
err := acc.FirstError()
if err == nil && v.hasError {
t.Fatalf("%s failed, should have error", v.name)
} else if err != nil && !v.hasError {
t.Fatalf("%s failed, err: %v", v.name, err)
}
if v.output == nil && len(acc.Metrics) > 0 {
t.Fatalf("%s: collected extra data", v.name)
} else if v.output != nil && len(v.output.Metrics) > 0 {
for i := range v.output.Metrics {
for k, m := range v.output.Metrics[i].Tags {
if acc.Metrics[i].Tags[k] != m {
t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k])
}
}
for k, m := range v.output.Metrics[i].Fields {
if acc.Metrics[i].Fields[k] != m {
t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k])
}
}
}
}
}
}