Skip to content

Commit

Permalink
metric: migrate all histograms to use prometheus-backed version
Browse files Browse the repository at this point in the history
In a previous change, a new prometheus-backed histogram library was
introdced to help standardize histogram buckets across the codebase.
This change migrates all existing histograms to use the new library.

related: cockroachdb#85990

Release justification: low risk, high benefit
Release note (ops change): This change introduces a new histogram
implementation that will reduce the total number of buckets and
standardize them across all usage. This should help increase the
usability of histograms when exxported to a UI (i.e. Grafana).
  • Loading branch information
aadityasondhi committed Aug 26, 2022
1 parent f59f7df commit 9b4984b
Show file tree
Hide file tree
Showing 32 changed files with 458 additions and 519 deletions.
2 changes: 1 addition & 1 deletion pkg/ccl/changefeedccl/changefeed_processors.go
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,7 @@ func (j *jobState) checkpointCompleted(ctx context.Context, checkpointDuration t

j.metrics.CheckpointHistNanos.RecordValue(checkpointDuration.Nanoseconds())
j.lastProgressUpdate = j.ts.Now()
j.checkpointDuration = time.Duration(j.metrics.CheckpointHistNanos.Snapshot().Mean())
j.checkpointDuration = time.Duration(j.metrics.CheckpointHistNanos.Mean())
j.progressUpdatesSkipped = false
}

Expand Down
44 changes: 19 additions & 25 deletions pkg/ccl/changefeedccl/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,20 +417,15 @@ func newAggregateMetrics(histogramWindow time.Duration) *AggMetrics {
a := &AggMetrics{
ErrorRetries: b.Counter(metaChangefeedErrorRetries),
EmittedMessages: b.Counter(metaChangefeedEmittedMessages),
MessageSize: b.Histogram(metaMessageSize,
histogramWindow, 10<<20 /* 10MB max message size */, 1),
EmittedBytes: b.Counter(metaChangefeedEmittedBytes),
FlushedBytes: b.Counter(metaChangefeedFlushedBytes),
Flushes: b.Counter(metaChangefeedFlushes),

BatchHistNanos: b.Histogram(metaChangefeedBatchHistNanos,
histogramWindow, changefeedBatchHistMaxLatency.Nanoseconds(), 1),
FlushHistNanos: b.Histogram(metaChangefeedFlushHistNanos,
histogramWindow, changefeedFlushHistMaxLatency.Nanoseconds(), 2),
CommitLatency: b.Histogram(metaCommitLatency,
histogramWindow, commitLatencyMaxValue.Nanoseconds(), 1),
AdmitLatency: b.Histogram(metaAdmitLatency, histogramWindow,
admitLatencyMaxValue.Nanoseconds(), 1),
MessageSize: b.Histogram(metaMessageSize, histogramWindow, metric.DataSizeBuckets),
EmittedBytes: b.Counter(metaChangefeedEmittedBytes),
FlushedBytes: b.Counter(metaChangefeedFlushedBytes),
Flushes: b.Counter(metaChangefeedFlushes),

BatchHistNanos: b.Histogram(metaChangefeedBatchHistNanos, histogramWindow, metric.BatchProcessLatencyBuckets),
FlushHistNanos: b.Histogram(metaChangefeedFlushHistNanos, histogramWindow, metric.BatchProcessLatencyBuckets),
CommitLatency: b.Histogram(metaCommitLatency, histogramWindow, metric.BatchProcessLatencyBuckets),
AdmitLatency: b.Histogram(metaAdmitLatency, histogramWindow, metric.BatchProcessLatencyBuckets),
BackfillCount: b.Gauge(metaChangefeedBackfillCount),
BackfillPendingRanges: b.Gauge(metaChangefeedBackfillPendingRanges),
RunningCount: b.Gauge(metaChangefeedRunning),
Expand Down Expand Up @@ -529,17 +524,16 @@ func (m *Metrics) getSLIMetrics(scope string) (*sliMetrics, error) {
// MakeMetrics makes the metrics for changefeed monitoring.
func MakeMetrics(histogramWindow time.Duration) metric.Struct {
m := &Metrics{
AggMetrics: newAggregateMetrics(histogramWindow),
KVFeedMetrics: kvevent.MakeMetrics(histogramWindow),
SchemaFeedMetrics: schemafeed.MakeMetrics(histogramWindow),
ResolvedMessages: metric.NewCounter(metaChangefeedForwardedResolvedMessages),
Failures: metric.NewCounter(metaChangefeedFailures),
QueueTimeNanos: metric.NewCounter(metaEventQueueTime),
CheckpointHistNanos: metric.NewHistogram(metaChangefeedCheckpointHistNanos, histogramWindow,
changefeedCheckpointHistMaxLatency.Nanoseconds(), 2),
FrontierUpdates: metric.NewCounter(metaChangefeedFrontierUpdates),
ThrottleMetrics: cdcutils.MakeMetrics(histogramWindow),
ReplanCount: metric.NewCounter(metaChangefeedReplanCount),
AggMetrics: newAggregateMetrics(histogramWindow),
KVFeedMetrics: kvevent.MakeMetrics(histogramWindow),
SchemaFeedMetrics: schemafeed.MakeMetrics(histogramWindow),
ResolvedMessages: metric.NewCounter(metaChangefeedForwardedResolvedMessages),
Failures: metric.NewCounter(metaChangefeedFailures),
QueueTimeNanos: metric.NewCounter(metaEventQueueTime),
CheckpointHistNanos: metric.NewHistogram(metaChangefeedCheckpointHistNanos, histogramWindow, metric.IOLatencyBuckets),
FrontierUpdates: metric.NewCounter(metaChangefeedFrontierUpdates),
ThrottleMetrics: cdcutils.MakeMetrics(histogramWindow),
ReplanCount: metric.NewCounter(metaChangefeedReplanCount),
}

m.mu.resolved = make(map[int]hlc.Timestamp)
Expand Down
10 changes: 7 additions & 3 deletions pkg/ccl/sqlproxyccl/connector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,9 @@ func TestConnector_dialTenantCluster(t *testing.T) {
defer cancel()

c := &connector{
DialTenantLatency: metric.NewLatency(metaDialTenantLatency, time.Millisecond),
DialTenantLatency: metric.NewHistogram(
metaDialTenantLatency, time.Millisecond, metric.IOLatencyBuckets,
),
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
}
c.testingKnobs.lookupAddr = func(ctx context.Context) (string, error) {
Expand Down Expand Up @@ -403,8 +405,10 @@ func TestConnector_dialTenantCluster(t *testing.T) {

var reportFailureFnCount int
c := &connector{
TenantID: roachpb.MakeTenantID(42),
DialTenantLatency: metric.NewLatency(metaDialTenantLatency, time.Millisecond),
TenantID: roachpb.MakeTenantID(42),
DialTenantLatency: metric.NewHistogram(
metaDialTenantLatency, time.Millisecond, metric.IOLatencyBuckets,
),
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
}
c.DirectoryCache = &testTenantDirectoryCache{
Expand Down
12 changes: 7 additions & 5 deletions pkg/ccl/sqlproxyccl/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,32 +224,34 @@ func makeProxyMetrics() metrics {
RoutingErrCount: metric.NewCounter(metaRoutingErrCount),
RefusedConnCount: metric.NewCounter(metaRefusedConnCount),
SuccessfulConnCount: metric.NewCounter(metaSuccessfulConnCount),
ConnectionLatency: metric.NewLatency(
ConnectionLatency: metric.NewHistogram(
metaConnMigrationAttemptedCount,
base.DefaultHistogramWindowInterval(),
metric.IOLatencyBuckets,
),
AuthFailedCount: metric.NewCounter(metaAuthFailedCount),
ExpiredClientConnCount: metric.NewCounter(metaExpiredClientConnCount),
// Connector metrics.
DialTenantLatency: metric.NewLatency(
DialTenantLatency: metric.NewHistogram(
metaDialTenantLatency,
base.DefaultHistogramWindowInterval(),
metric.NetworkLatencyBuckets,
),
DialTenantRetries: metric.NewCounter(metaDialTenantRetries),
// Connection migration metrics.
ConnMigrationSuccessCount: metric.NewCounter(metaConnMigrationSuccessCount),
ConnMigrationErrorFatalCount: metric.NewCounter(metaConnMigrationErrorFatalCount),
ConnMigrationErrorRecoverableCount: metric.NewCounter(metaConnMigrationErrorRecoverableCount),
ConnMigrationAttemptedCount: metric.NewCounter(metaConnMigrationAttemptedCount),
ConnMigrationAttemptedLatency: metric.NewLatency(
ConnMigrationAttemptedLatency: metric.NewHistogram(
metaConnMigrationAttemptedLatency,
base.DefaultHistogramWindowInterval(),
metric.NetworkLatencyBuckets,
),
ConnMigrationTransferResponseMessageSize: metric.NewHistogram(
metaConnMigrationTransferResponseMessageSize,
base.DefaultHistogramWindowInterval(),
maxExpectedTransferResponseMessageSize,
1,
metric.DataSizeBuckets,
),
QueryCancelReceivedPGWire: metric.NewCounter(metaQueryCancelReceivedPGWire),
QueryCancelReceivedHTTP: metric.NewCounter(metaQueryCancelReceivedHTTP),
Expand Down
6 changes: 3 additions & 3 deletions pkg/ccl/streamingccl/streamingest/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,11 +135,11 @@ func MakeMetrics(histogramWindow time.Duration) metric.Struct {
ResolvedEvents: metric.NewCounter(metaStreamingResolvedEventsIngested),
JobProgressUpdates: metric.NewCounter(metaJobProgressUpdates),
FlushHistNanos: metric.NewHistogram(metaStreamingFlushHistNanos,
histogramWindow, streamingFlushHistMaxLatency.Nanoseconds(), 1),
histogramWindow, metric.BatchProcessLatencyBuckets),
CommitLatency: metric.NewHistogram(metaStreamingCommitLatency,
histogramWindow, streamingCommitLatencyMaxValue.Nanoseconds(), 1),
histogramWindow, metric.BatchProcessLatencyBuckets),
AdmitLatency: metric.NewHistogram(metaStreamingAdmitLatency,
histogramWindow, streamingAdmitLatencyMaxValue.Nanoseconds(), 1),
histogramWindow, metric.BatchProcessLatencyBuckets),
RunningCount: metric.NewGauge(metaStreamsRunning),
EarliestDataCheckpointSpan: metric.NewGauge(metaEarliestDataCheckpointSpan),
LatestDataCheckpointSpan: metric.NewGauge(metaLatestDataCheckpointSpan),
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/bulk/bulk_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ const log10int64times1000 = 19 * 1000
// MakeBulkMetrics instantiates the metrics holder for bulk operation monitoring.
func MakeBulkMetrics(histogramWindow time.Duration) Metrics {
return Metrics{
MaxBytesHist: metric.NewHistogram(metaMemMaxBytes, histogramWindow, log10int64times1000, 3),
MaxBytesHist: metric.NewHistogram(metaMemMaxBytes, histogramWindow, metric.MemoryUsageBuckets),
CurBytesCount: metric.NewGauge(metaMemCurBytes),
}
}
4 changes: 2 additions & 2 deletions pkg/kv/kvclient/kvcoord/txn_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,11 +274,11 @@ func MakeTxnMetrics(histogramWindow time.Duration) TxnMetrics {
RefreshFailWithCondensedSpans: metric.NewCounter(metaRefreshFailWithCondensedSpans),
RefreshMemoryLimitExceeded: metric.NewCounter(metaRefreshMemoryLimitExceeded),
RefreshAutoRetries: metric.NewCounter(metaRefreshAutoRetries),
Durations: metric.NewLatency(metaDurationsHistograms, histogramWindow),
Durations: metric.NewHistogram(metaDurationsHistograms, histogramWindow, metric.IOLatencyBuckets),
TxnsWithCondensedIntents: metric.NewCounter(metaTxnsWithCondensedIntentSpans),
TxnsWithCondensedIntentsGauge: metric.NewGauge(metaTxnsWithCondensedIntentSpansGauge),
TxnsRejectedByLockSpanBudget: metric.NewCounter(metaTxnsRejectedByLockSpanBudget),
Restarts: metric.NewHistogram(metaRestartsHistogram, histogramWindow, 100, 3),
Restarts: metric.NewHistogram(metaRestartsHistogram, histogramWindow, metric.CountBuckets),
RestartsWriteTooOld: telemetry.NewCounterWithMetric(metaRestartsWriteTooOld),
RestartsWriteTooOldMulti: telemetry.NewCounterWithMetric(metaRestartsWriteTooOldMulti),
RestartsSerializable: telemetry.NewCounterWithMetric(metaRestartsSerializable),
Expand Down
16 changes: 10 additions & 6 deletions pkg/kv/kvprober/kvprober.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,14 +214,18 @@ func NewProber(opts Opts) *Prober {
writePlanner: newMeta2Planner(opts.DB, opts.Settings, func() time.Duration { return writeInterval.Get(&opts.Settings.SV) }),

metrics: Metrics{
ReadProbeAttempts: metric.NewCounter(metaReadProbeAttempts),
ReadProbeFailures: metric.NewCounter(metaReadProbeFailures),
ReadProbeLatency: metric.NewLatency(metaReadProbeLatency, opts.HistogramWindowInterval),
ReadProbeAttempts: metric.NewCounter(metaReadProbeAttempts),
ReadProbeFailures: metric.NewCounter(metaReadProbeFailures),
ReadProbeLatency: metric.NewHistogram(
metaReadProbeLatency, opts.HistogramWindowInterval, metric.IOLatencyBuckets,
),
WriteProbeAttempts: metric.NewCounter(metaWriteProbeAttempts),
WriteProbeFailures: metric.NewCounter(metaWriteProbeFailures),
WriteProbeLatency: metric.NewLatency(metaWriteProbeLatency, opts.HistogramWindowInterval),
ProbePlanAttempts: metric.NewCounter(metaProbePlanAttempts),
ProbePlanFailures: metric.NewCounter(metaProbePlanFailures),
WriteProbeLatency: metric.NewHistogram(
metaWriteProbeLatency, opts.HistogramWindowInterval, metric.IOLatencyBuckets,
),
ProbePlanAttempts: metric.NewCounter(metaProbePlanAttempts),
ProbePlanFailures: metric.NewCounter(metaProbePlanFailures),
},
tracer: opts.Tracer,
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/kv/kvserver/liveness/liveness.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,9 @@ func NewNodeLiveness(opts NodeLivenessOptions) *NodeLiveness {
HeartbeatSuccesses: metric.NewCounter(metaHeartbeatSuccesses),
HeartbeatFailures: telemetry.NewCounterWithMetric(metaHeartbeatFailures),
EpochIncrements: telemetry.NewCounterWithMetric(metaEpochIncrements),
HeartbeatLatency: metric.NewLatency(metaHeartbeatLatency, opts.HistogramWindowInterval),
HeartbeatLatency: metric.NewHistogram(
metaHeartbeatLatency, opts.HistogramWindowInterval, metric.IOLatencyBuckets,
),
}
nl.mu.nodes = make(map[roachpb.NodeID]Record)
nl.heartbeatToken <- struct{}{}
Expand Down
39 changes: 26 additions & 13 deletions pkg/kv/kvserver/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -2245,18 +2245,27 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics {
// Raft processing metrics.
RaftTicks: metric.NewCounter(metaRaftTicks),
RaftQuotaPoolPercentUsed: metric.NewHistogram(
// NB: this results in 64 buckets (i.e. 64 timeseries in prometheus).
metaRaftQuotaPoolPercentUsed, histogramWindow, 100 /* maxVal */, 1, /* sigFigs */
metaRaftQuotaPoolPercentUsed, histogramWindow, metric.PercentBuckets,
),
RaftWorkingDurationNanos: metric.NewCounter(metaRaftWorkingDurationNanos),
RaftTickingDurationNanos: metric.NewCounter(metaRaftTickingDurationNanos),
RaftCommandsApplied: metric.NewCounter(metaRaftCommandsApplied),
RaftLogCommitLatency: metric.NewLatency(metaRaftLogCommitLatency, histogramWindow),
RaftCommandCommitLatency: metric.NewLatency(metaRaftCommandCommitLatency, histogramWindow),
RaftHandleReadyLatency: metric.NewLatency(metaRaftHandleReadyLatency, histogramWindow),
RaftApplyCommittedLatency: metric.NewLatency(metaRaftApplyCommittedLatency, histogramWindow),
RaftSchedulerLatency: metric.NewLatency(metaRaftSchedulerLatency, histogramWindow),
RaftTimeoutCampaign: metric.NewCounter(metaRaftTimeoutCampaign),
RaftWorkingDurationNanos: metric.NewCounter(metaRaftWorkingDurationNanos),
RaftTickingDurationNanos: metric.NewCounter(metaRaftTickingDurationNanos),
RaftCommandsApplied: metric.NewCounter(metaRaftCommandsApplied),
RaftLogCommitLatency: metric.NewHistogram(
metaRaftLogCommitLatency, histogramWindow, metric.IOLatencyBuckets,
),
RaftCommandCommitLatency: metric.NewHistogram(
metaRaftCommandCommitLatency, histogramWindow, metric.IOLatencyBuckets,
),
RaftHandleReadyLatency: metric.NewHistogram(
metaRaftHandleReadyLatency, histogramWindow, metric.IOLatencyBuckets,
),
RaftApplyCommittedLatency: metric.NewHistogram(
metaRaftApplyCommittedLatency, histogramWindow, metric.IOLatencyBuckets,
),
RaftSchedulerLatency: metric.NewHistogram(
metaRaftSchedulerLatency, histogramWindow, metric.IOLatencyBuckets,
),
RaftTimeoutCampaign: metric.NewCounter(metaRaftTimeoutCampaign),

// Raft message metrics.
RaftRcvdMessages: [...]*metric.Counter{
Expand Down Expand Up @@ -2394,8 +2403,12 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics {
ReplicaCircuitBreakerCumTripped: metric.NewCounter(metaReplicaCircuitBreakerCumTripped),

// Replica batch evaluation.
ReplicaReadBatchEvaluationLatency: metric.NewLatency(metaReplicaReadBatchEvaluationLatency, histogramWindow),
ReplicaWriteBatchEvaluationLatency: metric.NewLatency(metaReplicaWriteBatchEvaluationLatency, histogramWindow),
ReplicaReadBatchEvaluationLatency: metric.NewHistogram(
metaReplicaReadBatchEvaluationLatency, histogramWindow, metric.IOLatencyBuckets,
),
ReplicaWriteBatchEvaluationLatency: metric.NewHistogram(
metaReplicaWriteBatchEvaluationLatency, histogramWindow, metric.IOLatencyBuckets,
),
}

{
Expand Down
6 changes: 2 additions & 4 deletions pkg/kv/kvserver/txnwait/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ func NewMetrics(histogramWindowInterval time.Duration) *Metrics {
Unit: metric.Unit_NANOSECONDS,
},
histogramWindowInterval,
time.Hour.Nanoseconds(),
1,
metric.LongRunningProcessLatencyBuckets,
),

QueryWaitTime: metric.NewHistogram(
Expand All @@ -86,8 +85,7 @@ func NewMetrics(histogramWindowInterval time.Duration) *Metrics {
Unit: metric.Unit_NANOSECONDS,
},
histogramWindowInterval,
time.Hour.Nanoseconds(),
1,
metric.LongRunningProcessLatencyBuckets,
),

DeadlocksTotal: metric.NewCounter(
Expand Down
4 changes: 3 additions & 1 deletion pkg/rpc/clock_offset.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,9 @@ func newRemoteClockMonitor(
r.metrics = RemoteClockMetrics{
ClockOffsetMeanNanos: metric.NewGauge(metaClockOffsetMeanNanos),
ClockOffsetStdDevNanos: metric.NewGauge(metaClockOffsetStdDevNanos),
LatencyHistogramNanos: metric.NewLatency(metaLatencyHistogramNanos, histogramWindowInterval),
LatencyHistogramNanos: metric.NewHistogram(
metaLatencyHistogramNanos, histogramWindowInterval, metric.IOLatencyBuckets,
),
}
return &r
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/server/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,9 @@ type nodeMetrics struct {

func makeNodeMetrics(reg *metric.Registry, histogramWindow time.Duration) nodeMetrics {
nm := nodeMetrics{
Latency: metric.NewLatency(metaExecLatency, histogramWindow),
Latency: metric.NewHistogram(
metaExecLatency, histogramWindow, metric.IOLatencyBuckets,
),
Success: metric.NewCounter(metaExecSuccess),
Err: metric.NewCounter(metaExecError),
DiskStalls: metric.NewCounter(metaDiskStalls),
Expand Down
1 change: 0 additions & 1 deletion pkg/server/status/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ go_test(
"//pkg/util/system",
"//pkg/util/timeutil",
"@com_github_kr_pretty//:pretty",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_shirou_gopsutil_v3//net",
],
)
Expand Down
20 changes: 0 additions & 20 deletions pkg/server/status/recorder.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,26 +529,6 @@ type registryRecorder struct {
func extractValue(name string, mtr interface{}, fn func(string, float64)) error {
switch mtr := mtr.(type) {
case *metric.Histogram:
// TODO(mrtracy): Where should this comment go for better
// visibility?
//
// Proper support of Histograms for time series is difficult and
// likely not worth the trouble. Instead, we aggregate a windowed
// histogram at fixed quantiles. If the scraping window and the
// histogram's eviction duration are similar, this should give
// good results; if the two durations are very different, we either
// report stale results or report only the more recent data.
//
// Additionally, we can only aggregate max/min of the quantiles;
// roll-ups don't know that and so they will return mathematically
// nonsensical values, but that seems acceptable for the time
// being.
curr, _ := mtr.Windowed()
for _, pt := range recordHistogramQuantiles {
fn(name+pt.suffix, float64(curr.ValueAtQuantile(pt.quantile)))
}
fn(name+"-count", float64(curr.TotalCount()))
case *metric.HistogramV2:
n := float64(mtr.TotalCountWindowed())
fn(name+"-count", n)
avg := mtr.TotalSumWindowed() / n
Expand Down
25 changes: 2 additions & 23 deletions pkg/server/status/recorder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/system"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/kr/pretty"
"github.com/prometheus/client_golang/prometheus"
)

// byTimeAndName is a slice of tspb.TimeSeriesData.
Expand Down Expand Up @@ -202,8 +201,7 @@ func TestMetricsRecorder(t *testing.T) {
{"testGauge", "gauge", 20},
{"testGaugeFloat64", "floatgauge", 20},
{"testCounter", "counter", 5},
{"testHistogram", "histogram", 10},
{"testHistogramV2", "histogramV2", 9},
{"testHistogram", "histogram", 9},
{"testLatency", "latency", 10},
{"testAggGauge", "agggauge", 4},
{"testAggCounter", "aggcounter", 7},
Expand Down Expand Up @@ -289,33 +287,14 @@ func TestMetricsRecorder(t *testing.T) {
c.Inc((data.val))
addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode)
case "histogram":
h := metric.NewHistogram(metric.Metadata{Name: reg.prefix + data.name}, time.Second, 1000, 2)
reg.reg.AddMetric(h)
h.RecordValue(data.val)
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
addExpected(reg.prefix, data.name+"-count", reg.source, 100, 1, reg.isNode)
case "histogramV2":
h := metric.NewHistogramV2(metric.Metadata{Name: reg.prefix + data.name}, time.Second,
prometheus.HistogramOpts{Buckets: []float64{1.0, 10.0, 100.0, 1000.0}})
h := metric.NewHistogram(metric.Metadata{Name: reg.prefix + data.name}, time.Second, []float64{1.0, 10.0, 100.0, 1000.0})
reg.reg.AddMetric(h)
h.RecordValue(data.val)
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, 10, reg.isNode)
}
addExpected(reg.prefix, data.name+"-count", reg.source, 100, 1, reg.isNode)
addExpected(reg.prefix, data.name+"-avg", reg.source, 100, 9, reg.isNode)
case "latency":
l := metric.NewLatency(metric.Metadata{Name: reg.prefix + data.name}, time.Hour)
reg.reg.AddMetric(l)
l.RecordValue(data.val)
// Latency is simply three histograms (at different resolution
// time scales).
for _, q := range recordHistogramQuantiles {
addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode)
}
addExpected(reg.prefix, data.name+"-count", reg.source, 100, 1, reg.isNode)
default:
t.Fatalf("unexpected: %+v", data)
}
Expand Down
Loading

0 comments on commit 9b4984b

Please sign in to comment.