Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Om86 #100

Closed
wants to merge 3 commits into from
Closed

Om86 #100

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 81 additions & 2 deletions watcher_latency.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package main

import (
"strings"

"github.com/prometheus/client_golang/prometheus"

log "github.com/sirupsen/logrus"
Expand All @@ -9,6 +11,8 @@
type LatencyWatcher struct {
}

var LatencyBenchmarks = make(map[string]float64)

func (lw *LatencyWatcher) describe(ch chan<- *prometheus.Desc) {}

func (lw *LatencyWatcher) passOneKeys() []string {
Expand All @@ -33,7 +37,7 @@
}

if ok {
return []string{"latencies:"}
return lw.getLatenciesCommands(rawMetrics)
}

return []string{"latency:"}
Expand All @@ -56,10 +60,22 @@
}
}

// loop all the latency infokeys
for ik := range infoKeys {
parseSingleLatenciesKey(infoKeys[ik], rawMetrics, allowedLatenciesList, blockedLatenciessList, ch)

Check failure on line 65 in watcher_latency.go

View workflow job for this annotation

GitHub Actions / lint

Error return value is not checked (errcheck)
}

return nil
}

func parseSingleLatenciesKey(singleLatencyKey string, rawMetrics map[string]string,
allowedLatenciesList map[string]struct{},
blockedLatenciessList map[string]struct{}, ch chan<- prometheus.Metric) error {

var latencyStats map[string]StatsMap

if rawMetrics["latencies:"] != "" {
latencyStats = parseLatencyInfo(rawMetrics["latencies:"], int(config.Aerospike.LatencyBucketsCount))
latencyStats = parseLatencyInfo(rawMetrics[singleLatencyKey], int(config.Aerospike.LatencyBucketsCount))
} else {
latencyStats = parseLatencyInfoLegacy(rawMetrics["latency:"], int(config.Aerospike.LatencyBucketsCount))
}
Expand Down Expand Up @@ -99,3 +115,66 @@

return nil
}

// Utility methods
// checks if a stat can be considered for latency stat retrieval
func canConsiderLatencyCommand(stat string) bool {
return (strings.Contains(stat, "enable-benchmarks-") ||
strings.Contains(stat, "enable-hist-")) // hist-proxy & hist-info - both at service level
}

func (lw *LatencyWatcher) getLatenciesCommands(rawMetrics map[string]string) []string {
var commands = []string{"latencies:"}

// below latency-command are added to the auto-enabled list, i.e. latencies: command
// re-repl is auto-enabled, but not coming as part of latencies: list, hence we are adding it explicitly
//
// Hashmap content format := namespace-<histogram-key> = <0/1>
for ns_latency_enabled_benchmark := range LatencyBenchmarks {
l_value := LatencyBenchmarks[ns_latency_enabled_benchmark]
// only if enabled, fetch the metrics
if l_value == 1 {
// if enable-hist-proxy
// command = latencies:hist={test}-proxy
// else if enable-benchmarks-fabric
// command = latencies:hist=benchmarks-fabric
// else if re-repl
// command = latencies:hist={test}-re-repl

if strings.Contains(ns_latency_enabled_benchmark, "re-repl") {
// Exception case
ns := strings.Split(ns_latency_enabled_benchmark, "-")[0]
l_command := "latencies:hist={" + ns + "}-re-repl"
commands = append(commands, l_command)
} else if strings.Contains(ns_latency_enabled_benchmark, "enable-hist-proxy") {
// Exception case
ns := strings.Split(ns_latency_enabled_benchmark, "-")[0]
l_command := "latencies:hist={" + ns + "}-proxy"
commands = append(commands, l_command)
} else if strings.Contains(ns_latency_enabled_benchmark, "enable-benchmarks-fabric") {
// Exception case
l_command := "latencies:hist=benchmarks-fabric"
commands = append(commands, l_command)
} else if strings.Contains(ns_latency_enabled_benchmark, "enable-hist-info") {
// Exception case
l_command := "latencies:hist=info"
commands = append(commands, l_command)
} else if strings.Contains(ns_latency_enabled_benchmark, "-benchmarks-") {
// remaining enabled benchmark latencies like
// enable-benchmarks-fabric, enable-benchmarks-ops-sub, enable-benchmarks-read
// enable-benchmarks-write, enable-benchmarks-udf, enable-benchmarks-udf-sub, enable-benchmarks-batch-sub

// format:= test-enable-benchmarks-read (or) test-enable-hist-proxy
ns := strings.Split(ns_latency_enabled_benchmark, "-")[0]
benchmarks_start_index := strings.LastIndex(ns_latency_enabled_benchmark, "-benchmarks-")
l_command := ns_latency_enabled_benchmark[benchmarks_start_index:]
l_command = "latencies:hist={" + ns + "}" + l_command
commands = append(commands, l_command)
}
}
}

log.Tracef("latency-passtwokeys:%s", commands)

return commands
}
10 changes: 10 additions & 0 deletions watcher_namespaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,16 @@ func (nw *NamespaceWatcher) refreshNamespaceStats(singleInfoKey string, infoKeys
// push to prom-channel
pushToPrometheus(asMetric, pv, labels, labelValues, ch)
}

// below code section is to ensure ns+latencies combination is handled during LatencyWatcher
//
// check and if latency benchmarks stat - is it enabled (bool true==1 and false==0 after conversion)
if canConsiderLatencyCommand(stat) {
LatencyBenchmarks[nsName+"-"+stat] = pv
}
// append default re-repl, as this auto-enabled, but not coming as part of latencies, we need this as namespace is available only here
LatencyBenchmarks[nsName+"-re-repl"] = 1

}

}
Expand Down
5 changes: 5 additions & 0 deletions watcher_node_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,5 +75,10 @@ func (sw *StatsWatcher) handleRefresh(o *Observer, nodeRawMetrics string, cluste

pushToPrometheus(asMetric, pv, labels, labelsValues, ch)

// check and if latency benchmarks stat, is it enabled (bool true==1 and false==0 after conversion)
if canConsiderLatencyCommand(stat) {
LatencyBenchmarks["service-"+stat] = pv
}

}
}
Loading