)
-// %+v equivalent to %+s:%d
-func (f Frame) Format(s fmt.State, verb rune) {
- switch verb {
- case 's':
- switch {
- case s.Flag('+'):
- io.WriteString(s, f.name())
- io.WriteString(s, "\n\t")
- io.WriteString(s, f.file())
- default:
- io.WriteString(s, path.Base(f.file()))
- }
- case 'd':
- io.WriteString(s, strconv.Itoa(f.line()))
- case 'n':
- io.WriteString(s, funcname(f.name()))
- case 'v':
- f.Format(s, 's')
- io.WriteString(s, ":")
- f.Format(s, 'd')
- }
-}
-
-// MarshalText formats a stacktrace Frame as a text string. The output is the
-// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
-func (f Frame) MarshalText() ([]byte, error) {
- name := f.name()
- if name == "unknown" {
- return []byte(name), nil
- }
- return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
-}
-
-// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
-type StackTrace []Frame
-
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
-func (st StackTrace) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- for _, f := range st {
- io.WriteString(s, "\n")
- f.Format(s, verb)
- }
- case s.Flag('#'):
- fmt.Fprintf(s, "%#v", []Frame(st))
- default:
- st.formatSlice(s, verb)
- }
- case 's':
- st.formatSlice(s, verb)
- }
-}
-
-// formatSlice will format this StackTrace into the given buffer as a slice of
-// Frame, only valid when called with '%s' or '%v'.
-func (st StackTrace) formatSlice(s fmt.State, verb rune) {
- io.WriteString(s, "[")
- for i, f := range st {
- if i > 0 {
- io.WriteString(s, " ")
- }
- f.Format(s, verb)
- }
- io.WriteString(s, "]")
-}
-
-// stack represents a stack of program counters.
-type stack []uintptr
-
-func (s *stack) Format(st fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case st.Flag('+'):
- for _, pc := range *s {
- f := Frame(pc)
- fmt.Fprintf(st, "\n%+v", f)
- }
- }
- }
-}
-
-func (s *stack) StackTrace() StackTrace {
- f := make([]Frame, len(*s))
- for i := 0; i < len(f); i++ {
- f[i] = Frame((*s)[i])
- }
- return f
-}
-
-func callers() *stack {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(3, pcs[:])
- var st stack = pcs[0:n]
- return &st
-}
-
-// funcname removes the path prefix component of a function's name reported by func.Name().
-func funcname(name string) string {
- i := strings.LastIndex(name, "/")
- name = name[i+1:]
- i = strings.Index(name, ".")
- return name[i+1:]
-}
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
deleted file mode 100644
index dd878a30ee..0000000000
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ /dev/null
@@ -1,23 +0,0 @@
-Prometheus instrumentation library for Go applications
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
-
-
-The following components are included in this product:
-
-perks - a fork of https://github.com/bmizerany/perks
-https://github.com/beorn7/perks
-Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
-See https://github.com/beorn7/perks/blob/master/README.md for license details.
-
-Go support for Protocol Buffers - Google's data interchange format
-http://github.com/golang/protobuf/
-Copyright 2010 The Go Authors
-See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
deleted file mode 100644
index 3460f0346d..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
deleted file mode 100644
index c67ff1b7fa..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ /dev/null
@@ -1 +0,0 @@
-See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
deleted file mode 100644
index 450189f35e..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "runtime/debug"
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
deleted file mode 100644
index cf05079fb8..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Collector is the interface implemented by anything that can be used by
-// Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Registerer.Register.
-//
-// The stock metrics provided by this package (Gauge, Counter, Summary,
-// Histogram, Untyped) are also Collectors (which only ever collect one metric,
-// namely itself). An implementer of Collector may, however, collect multiple
-// metrics in a coordinated fashion and/or create metrics on the fly. Examples
-// for collectors already implemented in this library are the metric vectors
-// (i.e. collection of multiple instances of the same Metric but with different
-// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
-type Collector interface {
- // Describe sends the super-set of all possible descriptors of metrics
- // collected by this Collector to the provided channel and returns once
- // the last descriptor has been sent. The sent descriptors fulfill the
- // consistency and uniqueness requirements described in the Desc
- // documentation.
- //
- // It is valid if one and the same Collector sends duplicate
- // descriptors. Those duplicates are simply ignored. However, two
- // different Collectors must not send duplicate descriptors.
- //
- // Sending no descriptor at all marks the Collector as “unchecked”,
- // i.e. no checks will be performed at registration time, and the
- // Collector may yield any Metric it sees fit in its Collect method.
- //
- // This method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. It may be called concurrently and
- // therefore must be implemented in a concurrency safe way.
- //
- // If a Collector encounters an error while executing this method, it
- // must send an invalid descriptor (created with NewInvalidDesc) to
- // signal the error to the registry.
- Describe(chan<- *Desc)
- // Collect is called by the Prometheus registry when collecting
- // metrics. The implementation sends each collected metric via the
- // provided channel and returns once the last metric has been sent. The
- // descriptor of each sent metric is one of those returned by Describe
- // (unless the Collector is unchecked, see above). Returned metrics that
- // share the same descriptor must differ in their variable label
- // values.
- //
- // This method may be called concurrently and must therefore be
- // implemented in a concurrency safe way. Blocking occurs at the expense
- // of total performance of rendering all registered metrics. Ideally,
- // Collector implementations support concurrent readers.
- Collect(chan<- Metric)
-}
-
-// DescribeByCollect is a helper to implement the Describe method of a custom
-// Collector. It collects the metrics from the provided Collector and sends
-// their descriptors to the provided channel.
-//
-// If a Collector collects the same metrics throughout its lifetime, its
-// Describe method can simply be implemented as:
-//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
-//
-// However, this will not work if the metrics collected change dynamically over
-// the lifetime of the Collector in a way that their combined set of descriptors
-// changes as well. The shortcut implementation will then violate the contract
-// of the Describe method. If a Collector sometimes collects no metrics at all
-// (for example vectors like CounterVec, GaugeVec, etc., which only collect
-// metrics after a metric with a fully specified label set has been accessed),
-// it might even get registered as an unchecked Collector (cf. the Register
-// method of the Registerer interface). Hence, only use this shortcut
-// implementation of Describe if you are certain to fulfill the contract.
-//
-// The Collector example demonstrates a use of DescribeByCollect.
-func DescribeByCollect(c Collector, descs chan<- *Desc) {
- metrics := make(chan Metric)
- go func() {
- c.Collect(metrics)
- close(metrics)
- }()
- for m := range metrics {
- descs <- m.Desc()
- }
-}
-
-// selfCollector implements Collector for a single Metric so that the Metric
-// collects itself. Add it as an anonymous field to a struct that implements
-// Metric, and call init with the Metric itself as an argument.
-type selfCollector struct {
- self Metric
-}
-
-// init provides the selfCollector with a reference to the metric it is supposed
-// to collect. It is usually called within the factory function to create a
-// metric. See example.
-func (c *selfCollector) init(self Metric) {
- c.self = self
-}
-
-// Describe implements Collector.
-func (c *selfCollector) Describe(ch chan<- *Desc) {
- ch <- c.self.Desc()
-}
-
-// Collect implements Collector.
-func (c *selfCollector) Collect(ch chan<- Metric) {
- ch <- c.self
-}
-
-// collectorMetric is a metric that is also a collector.
-// Because of selfCollector, most (if not all) Metrics in
-// this package are also collectors.
-type collectorMetric interface {
- Metric
- Collector
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
deleted file mode 100644
index 4ce84e7a80..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// Counter is a Metric that represents a single numerical value that only ever
-// goes up. That implies that it cannot be used to count items whose number can
-// also go down, e.g. the number of currently running goroutines. Those
-// "counters" are represented by Gauges.
-//
-// A Counter is typically used to count requests served, tasks completed, errors
-// occurred, etc.
-//
-// To create Counter instances, use NewCounter.
-type Counter interface {
- Metric
- Collector
-
- // Inc increments the counter by 1. Use Add to increment it by arbitrary
- // non-negative values.
- Inc()
- // Add adds the given value to the counter. It panics if the value is <
- // 0.
- Add(float64)
-}
-
-// ExemplarAdder is implemented by Counters that offer the option of adding a
-// value to the Counter together with an exemplar. Its AddWithExemplar method
-// works like the Add method of the Counter interface but also replaces the
-// currently saved exemplar (if any) with a new one, created from the provided
-// value, the current time as timestamp, and the provided labels. Empty Labels
-// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
-// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
-// of the provided labels are invalid, or if the provided labels contain more
-// than 128 runes in total.
-type ExemplarAdder interface {
- AddWithExemplar(value float64, exemplar Labels)
-}
-
-// CounterOpts is an alias for Opts. See there for doc comments.
-type CounterOpts Opts
-
-// CounterVecOpts bundles the options to create a CounterVec metric.
-// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type CounterVecOpts struct {
- CounterOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewCounter creates a new Counter based on the provided CounterOpts.
-//
-// The returned implementation also implements ExemplarAdder. It is safe to
-// perform the corresponding type assertion.
-//
-// The returned implementation tracks the counter value in two separate
-// variables, a float64 and a uint64. The latter is used to track calls of the
-// Inc method and calls of the Add method with a value that can be represented
-// as a uint64. This allows atomic increments of the counter with optimal
-// performance. (It is common to have an Inc call in very hot execution paths.)
-// Both internal tracking values are added up in the Write method. This has to
-// be taken into account when it comes to precision and overflow behavior.
-func NewCounter(opts CounterOpts) Counter {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- if opts.now == nil {
- opts.now = time.Now
- }
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
- result.init(result) // Init self-collection.
- result.createdTs = timestamppb.New(opts.now())
- return result
-}
-
-type counter struct {
- // valBits contains the bits of the represented float64 value, while
- // valInt stores values that are exact integers. Both have to go first
- // in the struct to guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
- valInt uint64
-
- selfCollector
- desc *Desc
-
- createdTs *timestamppb.Timestamp
- labelPairs []*dto.LabelPair
- exemplar atomic.Value // Containing nil or a *dto.Exemplar.
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-func (c *counter) Desc() *Desc {
- return c.desc
-}
-
-func (c *counter) Add(v float64) {
- if v < 0 {
- panic(errors.New("counter cannot decrease in value"))
- }
-
- ival := uint64(v)
- if float64(ival) == v {
- atomic.AddUint64(&c.valInt, ival)
- return
- }
-
- for {
- oldBits := atomic.LoadUint64(&c.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (c *counter) AddWithExemplar(v float64, e Labels) {
- c.Add(v)
- c.updateExemplar(v, e)
-}
-
-func (c *counter) Inc() {
- atomic.AddUint64(&c.valInt, 1)
-}
-
-func (c *counter) get() float64 {
- fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
- ival := atomic.LoadUint64(&c.valInt)
- return fval + float64(ival)
-}
-
-func (c *counter) Write(out *dto.Metric) error {
- // Read the Exemplar first and the value second. This is to avoid a race condition
- // where users see an exemplar for a not-yet-existing observation.
- var exemplar *dto.Exemplar
- if e := c.exemplar.Load(); e != nil {
- exemplar = e.(*dto.Exemplar)
- }
- val := c.get()
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
-}
-
-func (c *counter) updateExemplar(v float64, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, c.now(), l)
- if err != nil {
- panic(err)
- }
- c.exemplar.Store(e)
-}
-
-// CounterVec is a Collector that bundles a set of Counters that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. number of HTTP requests, partitioned by response code and
-// method). Create instances with NewCounterVec.
-type CounterVec struct {
- *MetricVec
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names.
-func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- return V2.NewCounterVec(CounterVecOpts{
- CounterOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
-func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- if opts.now == nil {
- opts.now = time.Now
- }
- return &CounterVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels.names) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
- }
- result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
- result.init(result) // Init self-collection.
- result.createdTs = timestamppb.New(opts.now())
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Counter for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Counter is created.
-//
-// It is possible to call this method without using the returned Counter to only
-// create the new Counter but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Counter for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Counter from the CounterVec. In that case,
-// the Counter will still exist, but it will not be exported anymore, even if a
-// Counter with the same label values is created later.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Counter for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Counter is created. Implications of
-// creating a Counter without using it and keeping the Counter for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
- c, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *CounterVec) With(labels Labels) Counter {
- c, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return c
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the CounterVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &CounterVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// CounterFunc is a Counter whose value is determined at collect time by calling a
-// provided function.
-//
-// To create CounterFunc instances, use NewCounterFunc.
-type CounterFunc interface {
- Metric
- Collector
-}
-
-// NewCounterFunc creates a new CounterFunc based on the provided
-// CounterOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a CounterFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe. The function should also honor
-// the contract for a Counter (values only go up, not down), but compliance will
-// not be checked.
-//
-// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
-func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), CounterValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
deleted file mode 100644
index 68ffe3c248..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/cespare/xxhash/v2"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
- "google.golang.org/protobuf/proto"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-)
-
-// Desc is the descriptor used by every Prometheus Metric. It is essentially
-// the immutable meta-data of a Metric. The normal Metric implementations
-// included in this package manage their Desc under the hood. Users only have to
-// deal with Desc if they use advanced features like the ExpvarCollector or
-// custom Collectors and Metrics.
-//
-// Descriptors registered with the same registry have to fulfill certain
-// consistency and uniqueness criteria if they share the same fully-qualified
-// name: They must have the same help string and the same label names (aka label
-// dimensions) in each, constLabels and variableLabels, but they must differ in
-// the values of the constLabels.
-//
-// Descriptors that share the same fully-qualified names and the same label
-// values of their constLabels are considered equal.
-//
-// Use NewDesc to create new Desc instances.
-type Desc struct {
- // fqName has been built from Namespace, Subsystem, and Name.
- fqName string
- // help provides some helpful information about this metric.
- help string
- // constLabelPairs contains precalculated DTO label pairs based on
- // the constant labels.
- constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels and normalization function for
- // which the metric maintains variable values.
- variableLabels *compiledLabels
- // id is a hash of the values of the ConstLabels and fqName. This
- // must be unique among all registered descriptors and can therefore be
- // used as an identifier of the descriptor.
- id uint64
- // dimHash is a hash of the label names (preset and variable) and the
- // Help string. Each Desc with the same fqName must have the same
- // dimHash.
- dimHash uint64
- // err is an error that occurred during construction. It is reported on
- // registration time.
- err error
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName must not be empty.
-//
-// variableLabels only contain the label names. Their label values are variable
-// and therefore not part of the Desc. (They are managed within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Collector example for a usage pattern.
-func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
- return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName must not be empty.
-//
-// variableLabels only contain the label names and normalization functions. Their
-// label values are variable and therefore not part of the Desc. (They are managed
-// within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Collector example for a usage pattern.
-func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
- d := &Desc{
- fqName: fqName,
- help: help,
- variableLabels: variableLabels.compile(),
- }
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
- d.err = fmt.Errorf("%q is not a valid metric name", fqName)
- return d
- }
- // labelValues contains the label values of const labels (in order of
- // their sorted label names) plus the fqName (at position 0).
- labelValues := make([]string, 1, len(constLabels)+1)
- labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
- labelNameSet := map[string]struct{}{}
- // First add only the const label names and sort them...
- for labelName := range constLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
- return d
- }
- labelNames = append(labelNames, labelName)
- labelNameSet[labelName] = struct{}{}
- }
- sort.Strings(labelNames)
- // ... so that we can now add const label values in the order of their names.
- for _, labelName := range labelNames {
- labelValues = append(labelValues, constLabels[labelName])
- }
- // Validate the const label values. They can't have a wrong cardinality, so
- // use in len(labelValues) as expectedNumberOfValues.
- if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
- d.err = err
- return d
- }
- // Now add the variable label names, but prefix them with something that
- // cannot be in a regular label name. That prevents matching the label
- // dimension with a different mix between preset and variable labels.
- for _, label := range d.variableLabels.names {
- if !checkLabelName(label) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
- return d
- }
- labelNames = append(labelNames, "$"+label)
- labelNameSet[label] = struct{}{}
- }
- if len(labelNames) != len(labelNameSet) {
- d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
- return d
- }
-
- xxh := xxhash.New()
- for _, val := range labelValues {
- xxh.WriteString(val)
- xxh.Write(separatorByteSlice)
- }
- d.id = xxh.Sum64()
- // Sort labelNames so that order doesn't matter for the hash.
- sort.Strings(labelNames)
- // Now hash together (in this order) the help string and the sorted
- // label names.
- xxh.Reset()
- xxh.WriteString(help)
- xxh.Write(separatorByteSlice)
- for _, labelName := range labelNames {
- xxh.WriteString(labelName)
- xxh.Write(separatorByteSlice)
- }
- d.dimHash = xxh.Sum64()
-
- d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
- for n, v := range constLabels {
- d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(v),
- })
- }
- sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
- return d
-}
-
-// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
-// provided error set. If a collector returning such a descriptor is registered,
-// registration will fail with the provided error. NewInvalidDesc can be used by
-// a Collector to signal inability to describe itself.
-func NewInvalidDesc(err error) *Desc {
- return &Desc{
- err: err,
- }
-}
-
-func (d *Desc) String() string {
- lpStrings := make([]string, 0, len(d.constLabelPairs))
- for _, lp := range d.constLabelPairs {
- lpStrings = append(
- lpStrings,
- fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
- )
- }
- vlStrings := make([]string, 0, len(d.variableLabels.names))
- for _, vl := range d.variableLabels.names {
- if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
- vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
- } else {
- vlStrings = append(vlStrings, vl)
- }
- }
- return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
- d.fqName,
- d.help,
- strings.Join(lpStrings, ","),
- strings.Join(vlStrings, ","),
- )
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
deleted file mode 100644
index 962608f02c..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package prometheus is the core instrumentation package. It provides metrics
-// primitives to instrument code for monitoring. It also offers a registry for
-// metrics. Sub-packages allow to expose the registered metrics via HTTP
-// (package promhttp) or push them to a Pushgateway (package push). There is
-// also a sub-package promauto, which provides metrics constructors with
-// automatic registration.
-//
-// All exported functions and methods are safe to be used concurrently unless
-// specified otherwise.
-//
-// # A Basic Example
-//
-// As a starting point, a very basic usage example:
-//
-// package main
-//
-// import (
-// "log"
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
-//
-// type metrics struct {
-// cpuTemp prometheus.Gauge
-// hdFailures *prometheus.CounterVec
-// }
-//
-// func NewMetrics(reg prometheus.Registerer) *metrics {
-// m := &metrics{
-// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// }),
-// hdFailures: prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// ),
-// }
-// reg.MustRegister(m.cpuTemp)
-// reg.MustRegister(m.hdFailures)
-// return m
-// }
-//
-// func main() {
-// // Create a non-global registry.
-// reg := prometheus.NewRegistry()
-//
-// // Create new metrics and register them using the custom registry.
-// m := NewMetrics(reg)
-// // Set values for the new created metrics.
-// m.cpuTemp.Set(65.3)
-// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-// // Expose metrics and custom registry via an HTTP server
-// // using the HandleFor function. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
-//
-// This is a complete program that exports two metrics, a Gauge and a Counter,
-// the latter with a label attached to turn it into a (one-dimensional) vector.
-// It register the metrics using a custom registry and exposes them via an HTTP server
-// on the /metrics endpoint.
-//
-// # Metrics
-//
-// The number of exported identifiers in this package might appear a bit
-// overwhelming. However, in addition to the basic plumbing shown in the example
-// above, you only need to understand the different metric types and their
-// vector versions for basic usage. Furthermore, if you are not concerned with
-// fine-grained control of when and how to register metrics with the registry,
-// have a look at the promauto package, which will effectively allow you to
-// ignore registration altogether in simple cases.
-//
-// Above, you have already touched the Counter and the Gauge. There are two more
-// advanced metric types: the Summary and Histogram. A more thorough description
-// of those four metric types can be found in the Prometheus docs:
-// https://prometheus.io/docs/concepts/metric_types/
-//
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
-// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// While only the fundamental metric types implement the Metric interface, both
-// the metrics and their vector versions implement the Collector interface. A
-// Collector manages the collection of a number of Metrics, but for convenience,
-// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
-// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec are not.
-//
-// To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
-//
-// # Custom Collectors and constant Metrics
-//
-// While you could create your own implementations of Metric, most likely you
-// will only ever implement the Collector interface on your own. At a first
-// glance, a custom Collector seems handy to bundle Metrics for common
-// registration (with the prime example of the different metric vectors above,
-// which bundle all the metrics of the same name but with different labels).
-//
-// There is a more involved use case, too: If you already have metrics
-// available, created outside of the Prometheus context, you don't need the
-// interface of the various Metric types. You essentially want to mirror the
-// existing numbers into Prometheus Metrics during collection. An own
-// implementation of the Collector interface is perfect for that. You can create
-// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
-// NewConstSummary (and their respective Must… versions). NewConstMetric is used
-// for all metric types with just a float64 as their value: Counter, Gauge, and
-// a special “type” called Untyped. Use the latter if you are not sure if the
-// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
-// happens in the Collect method. The Describe method has to return separate
-// Desc instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances. Alternatively,
-// you could return no Desc at all, which will mark the Collector “unchecked”.
-// No checks are performed at registration time, but metric consistency will
-// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
-// errors. Thus, with unchecked Collectors, the responsibility to not collect
-// metrics that lead to inconsistencies in the total scrape result lies with the
-// implementer of the Collector. While this is not a desirable state, it is
-// sometimes necessary. The typical use case is a situation where the exact
-// metrics to be returned by a Collector cannot be predicted at registration
-// time, but the implementer has sufficient knowledge of the whole system to
-// guarantee metric consistency.
-//
-// The Collector example illustrates the use case. You can also look at the
-// source code of the processCollector (mirroring process metrics), the
-// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
-// metrics) as examples that are used in this package itself.
-//
-// If you just need to call a function to get a single float value to collect as
-// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
-// shortcuts.
-//
-// # Advanced Uses of the Registry
-//
-// While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might cause.
-// As suggested by the name, MustRegister panics if an error occurs. With the
-// Register function, the error is returned and can be handled.
-//
-// An error is returned if the registered Collector is incompatible or
-// inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data model.
-// Inconsistencies are ideally detected at registration time, not at collect
-// time. The former will usually be detected at start-up time of a program,
-// while the latter will only happen at scrape time, possibly not even on the
-// first scrape if the inconsistency only becomes relevant later. That is the
-// main reason why a Collector and a Metric have to describe themselves to the
-// registry.
-//
-// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegisterer variable. With NewRegistry, you
-// can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in the
-// same way on a custom registry as the global functions Register and Unregister
-// on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries with
-// special properties, see NewPedanticRegistry. You can avoid global state, as
-// it is imposed by the DefaultRegisterer. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
-// separate registries for testing purposes.
-//
-// Also note that the DefaultRegisterer comes registered with a Collector for Go
-// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
-// NewProcessCollector). With a custom registry, you are in control and decide
-// yourself about the Collectors to register.
-//
-// # HTTP Exposition
-//
-// The Registry implements the Gatherer interface. The caller of the Gather
-// method can then expose the gathered metrics in some way. Usually, the metrics
-// are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
-//
-// # Pushing to the Pushgateway
-//
-// Function for pushing to the Pushgateway can be found in the push sub-package.
-//
-// # Graphite Bridge
-//
-// Functions and examples to push metrics from a Gatherer to Graphite can be
-// found in the graphite sub-package.
-//
-// # Other Means of Exposition
-//
-// More ways of exposing metrics can easily be added by following the approaches
-// of the existing implementations.
-package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
deleted file mode 100644
index de5a856293..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "encoding/json"
- "expvar"
-)
-
-type expvarCollector struct {
- exports map[string]*Desc
-}
-
-// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewExpvarCollector instead.
-func NewExpvarCollector(exports map[string]*Desc) Collector {
- return &expvarCollector{
- exports: exports,
- }
-}
-
-// Describe implements Collector.
-func (e *expvarCollector) Describe(ch chan<- *Desc) {
- for _, desc := range e.exports {
- ch <- desc
- }
-}
-
-// Collect implements Collector.
-func (e *expvarCollector) Collect(ch chan<- Metric) {
- for name, desc := range e.exports {
- var m Metric
- expVar := expvar.Get(name)
- if expVar == nil {
- continue
- }
- var v interface{}
- labels := make([]string, len(desc.variableLabels.names))
- if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
- ch <- NewInvalidMetric(desc, err)
- continue
- }
- var processValue func(v interface{}, i int)
- processValue = func(v interface{}, i int) {
- if i >= len(labels) {
- copiedLabels := append(make([]string, 0, len(labels)), labels...)
- switch v := v.(type) {
- case float64:
- m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
- case bool:
- if v {
- m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
- } else {
- m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
- }
- default:
- return
- }
- ch <- m
- return
- }
- vm, ok := v.(map[string]interface{})
- if !ok {
- return
- }
- for lv, val := range vm {
- labels[i] = lv
- processValue(val, i+1)
- }
- }
- processValue(v, 0)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
deleted file mode 100644
index 3d383a735c..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
deleted file mode 100644
index dd2eac9406..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "math"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Gauge is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// A Gauge is typically used for measured values like temperatures or current
-// memory usage, but also "counts" that can go up and down, like the number of
-// running goroutines.
-//
-// To create Gauge instances, use NewGauge.
-type Gauge interface {
- Metric
- Collector
-
- // Set sets the Gauge to an arbitrary value.
- Set(float64)
- // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
- // values.
- Inc()
- // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
- // values.
- Dec()
- // Add adds the given value to the Gauge. (The value can be negative,
- // resulting in a decrease of the Gauge.)
- Add(float64)
- // Sub subtracts the given value from the Gauge. (The value can be
- // negative, resulting in an increase of the Gauge.)
- Sub(float64)
-
- // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
- SetToCurrentTime()
-}
-
-// GaugeOpts is an alias for Opts. See there for doc comments.
-type GaugeOpts Opts
-
-// GaugeVecOpts bundles the options to create a GaugeVec metric.
-// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type GaugeVecOpts struct {
- GaugeOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewGauge creates a new Gauge based on the provided GaugeOpts.
-//
-// The returned implementation is optimized for a fast Set method. If you have a
-// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
-// the former. For example, the Inc method of the returned Gauge is slower than
-// the Inc method of a Counter returned by NewCounter. This matches the typical
-// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
-// the latter Inc-heavy.
-func NewGauge(opts GaugeOpts) Gauge {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
- result.init(result) // Init self-collection.
- return result
-}
-
-type gauge struct {
- // valBits contains the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- selfCollector
-
- desc *Desc
- labelPairs []*dto.LabelPair
-}
-
-func (g *gauge) Desc() *Desc {
- return g.desc
-}
-
-func (g *gauge) Set(val float64) {
- atomic.StoreUint64(&g.valBits, math.Float64bits(val))
-}
-
-func (g *gauge) SetToCurrentTime() {
- g.Set(float64(time.Now().UnixNano()) / 1e9)
-}
-
-func (g *gauge) Inc() {
- g.Add(1)
-}
-
-func (g *gauge) Dec() {
- g.Add(-1)
-}
-
-func (g *gauge) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&g.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (g *gauge) Sub(val float64) {
- g.Add(val * -1)
-}
-
-func (g *gauge) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
-}
-
-// GaugeVec is a Collector that bundles a set of Gauges that all share the same
-// Desc, but have different values for their variable labels. This is used if
-// you want to count the same thing partitioned by various dimensions
-// (e.g. number of operations queued, partitioned by user and operation
-// type). Create instances with NewGaugeVec.
-type GaugeVec struct {
- *MetricVec
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names.
-func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- return V2.NewGaugeVec(GaugeVecOpts{
- GaugeOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
-func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &GaugeVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels.names) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
- }
- result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
- result.init(result) // Init self-collection.
- return result
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Gauge for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Gauge is created.
-//
-// It is possible to call this method without using the returned Gauge to only
-// create the new Gauge but leave it at its starting value 0. See also the
-// SummaryVec example.
-//
-// Keeping the Gauge for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
-// Gauge will still exist, but it will not be exported anymore, even if a
-// Gauge with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Gauge for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Gauge is created. Implications of
-// creating a Gauge without using it and keeping the Gauge for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- g, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
-func (v *GaugeVec) With(labels Labels) Gauge {
- g, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return g
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the GaugeVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &GaugeVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-// GaugeFunc is a Gauge whose value is determined at collect time by calling a
-// provided function.
-//
-// To create GaugeFunc instances, use NewGaugeFunc.
-type GaugeFunc interface {
- Metric
- Collector
-}
-
-// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
-// value reported is determined by calling the given function from within the
-// Write method. Take into account that metric collection may happen
-// concurrently. Therefore, it must be safe to call the provided function
-// concurrently.
-//
-// NewGaugeFunc is a good way to create an “info” style metric with a constant
-// value of 1. Example:
-// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
-func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), GaugeValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
deleted file mode 100644
index 614fd61be9..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !js || wasm
-// +build !js wasm
-
-package prometheus
-
-import "os"
-
-func getPIDFn() func() (int, error) {
- pid := os.Getpid()
- return func() (int, error) {
- return pid, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
deleted file mode 100644
index eaf8059ee1..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js && !wasm
-// +build js,!wasm
-
-package prometheus
-
-func getPIDFn() func() (int, error) {
- return func() (int, error) {
- return 1, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
deleted file mode 100644
index ad9a71a5e0..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "runtime"
- "runtime/debug"
- "time"
-)
-
-// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
-// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
-// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
-// populated using runtime/metrics.
-func goRuntimeMemStats() memStatsMetrics {
- return memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- },
- }
-}
-
-type baseGoCollector struct {
- goroutinesDesc *Desc
- threadsDesc *Desc
- gcDesc *Desc
- gcLastTimeDesc *Desc
- goInfoDesc *Desc
-}
-
-func newBaseGoCollector() baseGoCollector {
- return baseGoCollector{
- goroutinesDesc: NewDesc(
- "go_goroutines",
- "Number of goroutines that currently exist.",
- nil, nil),
- threadsDesc: NewDesc(
- "go_threads",
- "Number of OS threads created.",
- nil, nil),
- gcDesc: NewDesc(
- "go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
- nil, nil),
- gcLastTimeDesc: NewDesc(
- "go_memstats_last_gc_time_seconds",
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil),
- goInfoDesc: NewDesc(
- "go_info",
- "Information about the Go environment.",
- nil, Labels{"version": runtime.Version()}),
- }
-}
-
-// Describe returns all descriptions of the collector.
-func (c *baseGoCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutinesDesc
- ch <- c.threadsDesc
- ch <- c.gcDesc
- ch <- c.gcLastTimeDesc
- ch <- c.goInfoDesc
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *baseGoCollector) Collect(ch chan<- Metric) {
- ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
-
- n := getRuntimeNumThreads()
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
-
- var stats debug.GCStats
- stats.PauseQuantiles = make([]time.Duration, 5)
- debug.ReadGCStats(&stats)
-
- quantiles := make(map[float64]float64)
- for idx, pq := range stats.PauseQuantiles[1:] {
- quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
- }
- quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
- ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
- ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-}
-
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
-// memStatsMetrics provide description, evaluator, runtime/metrics name, and
-// value type for memstat metrics.
-type memStatsMetrics []struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
deleted file mode 100644
index 897a6e906b..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.17
-// +build !go1.17
-
-package prometheus
-
-import (
- "runtime"
- "sync"
- "time"
-)
-
-type goCollector struct {
- base baseGoCollector
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
-}
-
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- msMetrics := goRuntimeMemStats()
- msMetrics = append(msMetrics, struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
- }{
- // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- })
- return &goCollector{
- base: newBaseGoCollector(),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: msMetrics,
- }
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- c.base.Describe(ch)
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
- // Collect base non-memory metrics.
- c.base.Collect(ch)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
-}
-
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
deleted file mode 100644
index 2d8d9f64f4..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.17
-// +build go1.17
-
-package prometheus
-
-import (
- "math"
- "runtime"
- "runtime/metrics"
- "strings"
- "sync"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-const (
- // constants for strings referenced more than once.
- goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
- goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
- goGCHeapFreesObjects = "/gc/heap/frees:objects"
- goGCHeapFreesBytes = "/gc/heap/frees:bytes"
- goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
- goGCHeapObjects = "/gc/heap/objects:objects"
- goGCHeapGoalBytes = "/gc/heap/goal:bytes"
- goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
- goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
- goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
- goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
- goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
- goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
- goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
- goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
- goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
- goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
- goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
- goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
- goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
- goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
-)
-
-// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
-var rmNamesForMemStatsMetrics = []string{
- goGCHeapTinyAllocsObjects,
- goGCHeapAllocsObjects,
- goGCHeapFreesObjects,
- goGCHeapAllocsBytes,
- goGCHeapObjects,
- goGCHeapGoalBytes,
- goMemoryClassesTotalBytes,
- goMemoryClassesHeapObjectsBytes,
- goMemoryClassesHeapUnusedBytes,
- goMemoryClassesHeapReleasedBytes,
- goMemoryClassesHeapFreeBytes,
- goMemoryClassesHeapStacksBytes,
- goMemoryClassesOSStacksBytes,
- goMemoryClassesMetadataMSpanInuseBytes,
- goMemoryClassesMetadataMSPanFreeBytes,
- goMemoryClassesMetadataMCacheInuseBytes,
- goMemoryClassesMetadataMCacheFreeBytes,
- goMemoryClassesProfilingBucketsBytes,
- goMemoryClassesMetadataOtherBytes,
- goMemoryClassesOtherBytes,
-}
-
-func bestEffortLookupRM(lookup []string) []metrics.Description {
- ret := make([]metrics.Description, 0, len(lookup))
- for _, rm := range metrics.All() {
- for _, m := range lookup {
- if m == rm.Name {
- ret = append(ret, rm)
- }
- }
- }
- return ret
-}
-
-type goCollector struct {
- base baseGoCollector
-
- // mu protects updates to all fields ensuring a consistent
- // snapshot is always produced by Collect.
- mu sync.Mutex
-
- // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
- sampleBuf []metrics.Sample
- // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
- sampleMap map[string]*metrics.Sample
-
- // rmExposedMetrics represents all runtime/metrics package metrics
- // that were configured to be exposed.
- rmExposedMetrics []collectorMetric
- rmExactSumMapForHist map[string]string
-
- // With Go 1.17, the runtime/metrics package was introduced.
- // From that point on, metric names produced by the runtime/metrics
- // package could be generated from runtime/metrics names. However,
- // these differ from the old names for the same values.
- //
- // This field exists to export the same values under the old names
- // as well.
- msMetrics memStatsMetrics
- msMetricsEnabled bool
-}
-
-type rmMetricDesc struct {
- metrics.Description
-}
-
-func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
- var descs []rmMetricDesc
- for _, d := range metrics.All() {
- var (
- deny = true
- desc rmMetricDesc
- )
-
- for _, r := range rules {
- if !r.Matcher.MatchString(d.Name) {
- continue
- }
- deny = r.Deny
- }
- if deny {
- continue
- }
-
- desc.Description = d
- descs = append(descs, desc)
- }
- return descs
-}
-
-func defaultGoCollectorOptions() internal.GoCollectorOptions {
- return internal.GoCollectorOptions{
- RuntimeMetricSumForHist: map[string]string{
- "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
- "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
- },
- RuntimeMetricRules: []internal.GoCollectorRule{
- //{Matcher: regexp.MustCompile("")},
- },
- }
-}
-
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
- opt := defaultGoCollectorOptions()
- for _, o := range opts {
- o(&opt)
- }
-
- exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
-
- // Collect all histogram samples so that we can get their buckets.
- // The API guarantees that the buckets are always fixed for the lifetime
- // of the process.
- var histograms []metrics.Sample
- for _, d := range exposedDescriptions {
- if d.Kind == metrics.KindFloat64Histogram {
- histograms = append(histograms, metrics.Sample{Name: d.Name})
- }
- }
-
- if len(histograms) > 0 {
- metrics.Read(histograms)
- }
-
- bucketsMap := make(map[string][]float64)
- for i := range histograms {
- bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
- }
-
- // Generate a collector for each exposed runtime/metrics metric.
- metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
- // SampleBuf is used for reading from runtime/metrics.
- // We are assuming the largest case to have stable pointers for sampleMap purposes.
- sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
- sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
- for _, d := range exposedDescriptions {
- namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
- if !ok {
- // Just ignore this metric; we can't do anything with it here.
- // If a user decides to use the latest version of Go, we don't want
- // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
- continue
- }
-
- sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
- sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
-
- var m collectorMetric
- if d.Kind == metrics.KindFloat64Histogram {
- _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
- unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
- m = newBatchHistogram(
- NewDesc(
- BuildFQName(namespace, subsystem, name),
- d.Description.Description,
- nil,
- nil,
- ),
- internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
- hasSum,
- )
- } else if d.Cumulative {
- m = NewCounter(CounterOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: d.Description.Description,
- },
- )
- } else {
- m = NewGauge(GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: d.Description.Description,
- })
- }
- metricSet = append(metricSet, m)
- }
-
- // Add exact sum metrics to sampleBuf if not added before.
- for _, h := range histograms {
- sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
- if !ok {
- continue
- }
-
- if _, ok := sampleMap[sumMetric]; ok {
- continue
- }
- sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
- sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
- }
-
- var (
- msMetrics memStatsMetrics
- msDescriptions []metrics.Description
- )
-
- if !opt.DisableMemStatsLikeMetrics {
- msMetrics = goRuntimeMemStats()
- msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
-
- // Check if metric was not exposed before and if not, add to sampleBuf.
- for _, mdDesc := range msDescriptions {
- if _, ok := sampleMap[mdDesc.Name]; ok {
- continue
- }
- sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
- sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
- }
- }
-
- return &goCollector{
- base: newBaseGoCollector(),
- sampleBuf: sampleBuf,
- sampleMap: sampleMap,
- rmExposedMetrics: metricSet,
- rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
- msMetrics: msMetrics,
- msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
- }
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- c.base.Describe(ch)
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
- for _, m := range c.rmExposedMetrics {
- ch <- m.Desc()
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- // Collect base non-memory metrics.
- c.base.Collect(ch)
-
- if len(c.sampleBuf) == 0 {
- return
- }
-
- // Collect must be thread-safe, so prevent concurrent use of
- // sampleBuf elements. Just read into sampleBuf but write all the data
- // we get into our Metrics or MemStats.
- //
- // This lock also ensures that the Metrics we send out are all from
- // the same updates, ensuring their mutual consistency insofar as
- // is guaranteed by the runtime/metrics package.
- //
- // N.B. This locking is heavy-handed, but Collect is expected to be called
- // relatively infrequently. Also the core operation here, metrics.Read,
- // is fast (O(tens of microseconds)) so contention should certainly be
- // low, though channel operations and any allocations may add to that.
- c.mu.Lock()
- defer c.mu.Unlock()
-
- // Populate runtime/metrics sample buffer.
- metrics.Read(c.sampleBuf)
-
- // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
- for i, metric := range c.rmExposedMetrics {
- // We created samples for exposed metrics first in order, so indexes match.
- sample := c.sampleBuf[i]
-
- // N.B. switch on concrete type because it's significantly more efficient
- // than checking for the Counter and Gauge interface implementations. In
- // this case, we control all the types here.
- switch m := metric.(type) {
- case *counter:
- // Guard against decreases. This should never happen, but a failure
- // to do so will result in a panic, which is a harsh consequence for
- // a metrics collection bug.
- v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
- if v1 > v0 {
- m.Add(unwrapScalarRMValue(sample.Value) - m.get())
- }
- m.Collect(ch)
- case *gauge:
- m.Set(unwrapScalarRMValue(sample.Value))
- m.Collect(ch)
- case *batchHistogram:
- m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
- m.Collect(ch)
- default:
- panic("unexpected metric type")
- }
- }
-
- if c.msMetricsEnabled {
- // ms is a dummy MemStats that we populate ourselves so that we can
- // populate the old metrics from it if goMemStatsCollection is enabled.
- var ms runtime.MemStats
- memStatsFromRM(&ms, c.sampleMap)
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
- }
- }
-}
-
-// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
-// to be scalar and returns the equivalent float64 value. Panics if the
-// value is not scalar.
-func unwrapScalarRMValue(v metrics.Value) float64 {
- switch v.Kind() {
- case metrics.KindUint64:
- return float64(v.Uint64())
- case metrics.KindFloat64:
- return v.Float64()
- case metrics.KindBad:
- // Unsupported metric.
- //
- // This should never happen because we always populate our metric
- // set from the runtime/metrics package.
- panic("unexpected unsupported metric")
- default:
- // Unsupported metric kind.
- //
- // This should never happen because we check for this during initialization
- // and flag and filter metrics whose kinds we don't understand.
- panic("unexpected unsupported metric kind")
- }
-}
-
-// exactSumFor takes a runtime/metrics metric name (that is assumed to
-// be of kind KindFloat64Histogram) and returns its exact sum and whether
-// its exact sum exists.
-//
-// The runtime/metrics API for histograms doesn't currently expose exact
-// sums, but some of the other metrics are in fact exact sums of histograms.
-func (c *goCollector) exactSumFor(rmName string) float64 {
- sumName, ok := c.rmExactSumMapForHist[rmName]
- if !ok {
- return 0
- }
- s, ok := c.sampleMap[sumName]
- if !ok {
- return 0
- }
- return unwrapScalarRMValue(s.Value)
-}
-
-func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
- lookupOrZero := func(name string) uint64 {
- if s, ok := rm[name]; ok {
- return s.Value.Uint64()
- }
- return 0
- }
-
- // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
- // The reason for this is because MemStats couldn't be extended at the time
- // but there was a desire to have Mallocs at least be a little more representative,
- // while having Mallocs - Frees still represent a live object count.
- // Unfortunately, MemStats doesn't actually export a large allocation count,
- // so it's impossible to pull this number out directly.
- tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
- ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
- ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
-
- ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
- ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
- ms.Lookups = 0 // Already always zero.
- ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
- ms.Alloc = ms.HeapAlloc
- ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
- ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
- ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
- ms.HeapSys = ms.HeapInuse + ms.HeapIdle
- ms.HeapObjects = lookupOrZero(goGCHeapObjects)
- ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
- ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
- ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
- ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
- ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
- ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
- ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
- ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
- ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
- ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
-
- // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
- // and often misleading due to the fact that it's an average over the lifetime
- // of the process.
- // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
- // for more details.
- ms.GCCPUFraction = 0
-}
-
-// batchHistogram is a mutable histogram that is updated
-// in batches.
-type batchHistogram struct {
- selfCollector
-
- // Static fields updated only once.
- desc *Desc
- hasSum bool
-
- // Because this histogram operates in batches, it just uses a
- // single mutex for everything. updates are always serialized
- // but Write calls may operate concurrently with updates.
- // Contention between these two sources should be rare.
- mu sync.Mutex
- buckets []float64 // Inclusive lower bounds, like runtime/metrics.
- counts []uint64
- sum float64 // Used if hasSum is true.
-}
-
-// newBatchHistogram creates a new batch histogram value with the given
-// Desc, buckets, and whether or not it has an exact sum available.
-//
-// buckets must always be from the runtime/metrics package, following
-// the same conventions.
-func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
- // We need to remove -Inf values. runtime/metrics keeps them around.
- // But -Inf bucket should not be allowed for prometheus histograms.
- if buckets[0] == math.Inf(-1) {
- buckets = buckets[1:]
- }
- h := &batchHistogram{
- desc: desc,
- buckets: buckets,
- // Because buckets follows runtime/metrics conventions, there's
- // 1 more value in the buckets list than there are buckets represented,
- // because in runtime/metrics, the bucket values represent *boundaries*,
- // and non-Inf boundaries are inclusive lower bounds for that bucket.
- counts: make([]uint64, len(buckets)-1),
- hasSum: hasSum,
- }
- h.init(h)
- return h
-}
-
-// update updates the batchHistogram from a runtime/metrics histogram.
-//
-// sum must be provided if the batchHistogram was created to have an exact sum.
-// h.buckets must be a strict subset of his.Buckets.
-func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
- counts, buckets := his.Counts, his.Buckets
-
- h.mu.Lock()
- defer h.mu.Unlock()
-
- // Clear buckets.
- for i := range h.counts {
- h.counts[i] = 0
- }
- // Copy and reduce buckets.
- var j int
- for i, count := range counts {
- h.counts[j] += count
- if buckets[i+1] == h.buckets[j+1] {
- j++
- }
- }
- if h.hasSum {
- h.sum = sum
- }
-}
-
-func (h *batchHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *batchHistogram) Write(out *dto.Metric) error {
- h.mu.Lock()
- defer h.mu.Unlock()
-
- sum := float64(0)
- if h.hasSum {
- sum = h.sum
- }
- dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
- totalCount := uint64(0)
- for i, count := range h.counts {
- totalCount += count
- if !h.hasSum {
- if count != 0 {
- // N.B. This computed sum is an underestimate.
- sum += h.buckets[i] * float64(count)
- }
- }
-
- // Skip the +Inf bucket, but only for the bucket list.
- // It must still count for sum and totalCount.
- if math.IsInf(h.buckets[i+1], 1) {
- break
- }
- // Float64Histogram's upper bound is exclusive, so make it inclusive
- // by obtaining the next float64 value down, in order.
- upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
- dtoBuckets = append(dtoBuckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(totalCount),
- UpperBound: proto.Float64(upperBound),
- })
- }
- out.Histogram = &dto.Histogram{
- Bucket: dtoBuckets,
- SampleCount: proto.Uint64(totalCount),
- SampleSum: proto.Float64(sum),
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
deleted file mode 100644
index b5c8bcb395..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ /dev/null
@@ -1,1577 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// nativeHistogramBounds for the frac of observed values. Only relevant for
-// schema > 0. The position in the slice is the schema. (0 is never used, just
-// here for convenience of using the schema directly as the index.)
-//
-// TODO(beorn7): Currently, we do a binary search into these slices. There are
-// ways to turn it into a small number of simple array lookups. It probably only
-// matters for schema 5 and beyond, but should be investigated. See this comment
-// as a starting point:
-// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
-var nativeHistogramBounds = [][]float64{
- // Schema "0":
- {0.5},
- // Schema 1:
- {0.5, 0.7071067811865475},
- // Schema 2:
- {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
- // Schema 3:
- {
- 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
- 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
- },
- // Schema 4:
- {
- 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
- 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
- 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
- 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
- },
- // Schema 5:
- {
- 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
- 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
- 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
- 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
- 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
- 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
- 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
- 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
- },
- // Schema 6:
- {
- 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
- 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
- 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
- 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
- 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
- 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
- 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
- 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
- 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
- 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
- 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
- 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
- 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
- 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
- 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
- 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
- },
- // Schema 7:
- {
- 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
- 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
- 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
- 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
- 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
- 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
- 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
- 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
- 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
- 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
- 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
- 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
- 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
- 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
- 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
- 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
- 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
- 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
- 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
- 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
- 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
- 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
- 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
- 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
- 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
- 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
- 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
- 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
- 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
- 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
- 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
- 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
- },
- // Schema 8:
- {
- 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
- 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
- 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
- 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
- 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
- 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
- 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
- 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
- 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
- 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
- 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
- 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
- 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
- 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
- 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
- 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
- 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
- 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
- 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
- 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
- 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
- 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
- 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
- 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
- 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
- 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
- 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
- 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
- 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
- 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
- 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
- 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
- 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
- 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
- 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
- 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
- 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
- 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
- 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
- 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
- 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
- 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
- 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
- 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
- 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
- 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
- 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
- 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
- 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
- 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
- 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
- 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
- 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
- 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
- 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
- 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
- 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
- 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
- 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
- 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
- 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
- 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
- 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
- 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
- },
-}
-
-// The nativeHistogramBounds above can be generated with the code below.
-//
-// TODO(beorn7): It's tempting to actually use `go generate` to generate the
-// code above. However, this could lead to slightly different numbers on
-// different architectures. We still need to come to terms if we are fine with
-// that, or if we might prefer to specify precise numbers in the standard.
-//
-// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
-//
-// func init() {
-// // Populate nativeHistogramBounds.
-// numBuckets := 1
-// for i := range nativeHistogramBounds {
-// bounds := []float64{0.5}
-// factor := math.Exp2(math.Exp2(float64(-i)))
-// for j := 0; j < numBuckets-1; j++ {
-// var bound float64
-// if (j+1)%2 == 0 {
-// // Use previously calculated value for increased precision.
-// bound = nativeHistogramBounds[i-1][j/2+1]
-// } else {
-// bound = bounds[j] * factor
-// }
-// bounds = append(bounds, bound)
-// }
-// numBuckets *= 2
-// nativeHistogramBounds[i] = bounds
-// }
-// }
-
-// A Histogram counts individual observations from an event or sample stream in
-// configurable static buckets (or in dynamic sparse buckets as part of the
-// experimental Native Histograms, see below for more details). Similar to a
-// Summary, it also provides a sum of observations and an observation count.
-//
-// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile PromQL function.
-//
-// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
-// (see the documentation for detailed procedures). However, Histograms require
-// the user to pre-define suitable buckets, and they are in general less
-// accurate. (Both problems are addressed by the experimental Native
-// Histograms. To use them, configure a NativeHistogramBucketFactor in the
-// HistogramOpts. They also require a Prometheus server v2.40+ with the
-// corresponding feature flag enabled.)
-//
-// The Observe method of a Histogram has a very low performance overhead in
-// comparison with the Observe method of a Summary.
-//
-// To create Histogram instances, use NewHistogram.
-type Histogram interface {
- Metric
- Collector
-
- // Observe adds a single observation to the histogram. Observations are
- // usually positive or zero. Negative observations are accepted but
- // prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. (The experimental Native
- // Histograms handle negative observations properly.) See
- // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
- // for details.
- Observe(float64)
-}
-
-// bucketLabel is used for the label that defines the upper bound of a
-// bucket of a histogram ("le" -> "less or equal").
-const bucketLabel = "le"
-
-// DefBuckets are the default Histogram buckets. The default buckets are
-// tailored to broadly measure the response time (in seconds) of a network
-// service. Most likely, however, you will be required to define buckets
-// customized to your use case.
-var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
-
-// DefNativeHistogramZeroThreshold is the default value for
-// NativeHistogramZeroThreshold in the HistogramOpts.
-//
-// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
-// which is a bucket boundary at all possible resolutions.
-const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
-
-// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
-// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
-// bucket that only receives observations of precisely zero.
-const NativeHistogramZeroThresholdZero = -1
-
-var errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
-)
-
-// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
-// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
-// counted and not included in the returned slice. The returned slice is meant
-// to be used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is zero or negative.
-func LinearBuckets(start, width float64, count int) []float64 {
- if count < 1 {
- panic("LinearBuckets needs a positive count")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start += width
- }
- return buckets
-}
-
-// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
-// has an upper bound of 'start' and each following bucket's upper bound is
-// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
-// not counted and not included in the returned slice. The returned slice is
-// meant to be used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
-// or if 'factor' is less than or equal 1.
-func ExponentialBuckets(start, factor float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBuckets needs a positive count")
- }
- if start <= 0 {
- panic("ExponentialBuckets needs a positive start value")
- }
- if factor <= 1 {
- panic("ExponentialBuckets needs a factor greater than 1")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start *= factor
- }
- return buckets
-}
-
-// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
-// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
-func ExponentialBucketsRange(min, max float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBucketsRange count needs a positive count")
- }
- if min <= 0 {
- panic("ExponentialBucketsRange min needs to be greater than 0")
- }
-
- // Formula for exponential buckets.
- // max = min*growthFactor^(bucketCount-1)
-
- // We know max/min and highest bucket. Solve for growthFactor.
- growthFactor := math.Pow(max/min, 1.0/float64(count-1))
-
- // Now that we know growthFactor, solve for each bucket.
- buckets := make([]float64, count)
- for i := 1; i <= count; i++ {
- buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
- }
- return buckets
-}
-
-// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name to a non-empty string. All other fields are optional
-// and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type HistogramOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Histogram (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Histogram must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Histogram.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // Buckets defines the buckets into which observations are counted. Each
- // element in the slice is the upper inclusive bound of a bucket. The
- // values must be sorted in strictly increasing order. There is no need
- // to add a highest bucket with +Inf bound, it will be added
- // implicitly. If Buckets is left as nil or set to a slice of length
- // zero, it is replaced by default buckets. The default buckets are
- // DefBuckets if no buckets for a native histogram (see below) are used,
- // otherwise the default is no buckets. (In other words, if you want to
- // use both regular buckets and buckets for a native histogram, you have
- // to define the regular buckets here explicitly.)
- Buckets []float64
-
- // If NativeHistogramBucketFactor is greater than one, so-called sparse
- // buckets are used (in addition to the regular buckets, if defined
- // above). A Histogram with sparse buckets will be ingested as a Native
- // Histogram by a Prometheus server with that feature enabled (requires
- // Prometheus v2.40+). Sparse buckets are exponential buckets covering
- // the whole float64 range (with the exception of the “zero” bucket, see
- // NativeHistogramZeroThreshold below). From any one bucket to the next,
- // the width of the bucket grows by a constant
- // factor. NativeHistogramBucketFactor provides an upper bound for this
- // factor (exception see below). The smaller
- // NativeHistogramBucketFactor, the more buckets will be used and thus
- // the more costly the histogram will become. A generally good trade-off
- // between cost and accuracy is a value of 1.1 (each bucket is at most
- // 10% wider than the previous one), which will result in each power of
- // two divided into 8 buckets (e.g. there will be 8 buckets between 1
- // and 2, same as between 2 and 4, and 4 and 8, etc.).
- //
- // Details about the actually used factor: The factor is calculated as
- // 2^(2^-n), where n is an integer number between (and including) -4 and
- // 8. n is chosen so that the resulting factor is the largest that is
- // still smaller or equal to NativeHistogramBucketFactor. Note that the
- // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
- // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
- // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
- // it is larger than the provided NativeHistogramBucketFactor.
- //
- // NOTE: Native Histograms are still an experimental feature. Their
- // behavior might still change without a major version
- // bump. Subsequently, all NativeHistogram... options here might still
- // change their behavior or name (or might completely disappear) without
- // a major version bump.
- NativeHistogramBucketFactor float64
- // All observations with an absolute value of less or equal
- // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
- // For best results, this should be close to a bucket boundary. This is
- // usually the case if picking a power of two. If
- // NativeHistogramZeroThreshold is left at zero,
- // DefNativeHistogramZeroThreshold is used as the threshold. To
- // configure a zero bucket with an actual threshold of zero (i.e. only
- // observations of precisely zero will go into the zero bucket), set
- // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
- // constant (or any negative float value).
- NativeHistogramZeroThreshold float64
-
- // The remaining fields define a strategy to limit the number of
- // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
- // at zero, the number of buckets is not limited. (Note that this might
- // lead to unbounded memory consumption if the values observed by the
- // Histogram are sufficiently wide-spread. In particular, this could be
- // used as a DoS attack vector. Where the observed values depend on
- // external inputs, it is highly recommended to set a
- // NativeHistogramMaxBucketNumber.) Once the set
- // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
- // enacted:
- // - First, if the last reset (or the creation) of the histogram is at
- // least NativeHistogramMinResetDuration ago, then the whole
- // histogram is reset to its initial state (including regular
- // buckets).
- // - If less time has passed, or if NativeHistogramMinResetDuration is
- // zero, no reset is performed. Instead, the zero threshold is
- // increased sufficiently to reduce the number of buckets to or below
- // NativeHistogramMaxBucketNumber, but not to more than
- // NativeHistogramMaxZeroThreshold. Thus, if
- // NativeHistogramMaxZeroThreshold is already at or below the current
- // zero threshold, nothing happens at this step.
- // - After that, if the number of buckets still exceeds
- // NativeHistogramMaxBucketNumber, the resolution of the histogram is
- // reduced by doubling the width of the sparse buckets (up to a
- // growth factor between one bucket to the next of 2^(2^4) = 65536,
- // see above).
- // - Any increased zero threshold or reduced resolution is reset back
- // to their original values once NativeHistogramMinResetDuration has
- // passed (since the last reset or the creation of the histogram).
- NativeHistogramMaxBucketNumber uint32
- NativeHistogramMinResetDuration time.Duration
- NativeHistogramMaxZeroThreshold float64
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-
- // afterFunc is for testing purposes, by default it's time.AfterFunc.
- afterFunc func(time.Duration, func()) *time.Timer
-}
-
-// HistogramVecOpts bundles the options to create a HistogramVec metric.
-// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type HistogramVecOpts struct {
- HistogramOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
-// panics if the buckets in HistogramOpts are not in strictly increasing order.
-//
-// The returned implementation also implements ExemplarObserver. It is safe to
-// perform the corresponding type assertion. Exemplars are tracked separately
-// for each bucket.
-func NewHistogram(opts HistogramOpts) Histogram {
- return newHistogram(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels.names) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
- }
-
- for _, n := range desc.variableLabels.names {
- if n == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
-
- if opts.now == nil {
- opts.now = time.Now
- }
- if opts.afterFunc == nil {
- opts.afterFunc = time.AfterFunc
- }
- h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
- nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
- nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
- lastResetTime: opts.now(),
- now: opts.now,
- afterFunc: opts.afterFunc,
- }
- if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
- h.upperBounds = DefBuckets
- }
- if opts.NativeHistogramBucketFactor <= 1 {
- h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
- } else {
- switch {
- case opts.NativeHistogramZeroThreshold > 0:
- h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
- case opts.NativeHistogramZeroThreshold == 0:
- h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
- } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
- h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
- }
- for i, upperBound := range h.upperBounds {
- if i < len(h.upperBounds)-1 {
- if upperBound >= h.upperBounds[i+1] {
- panic(fmt.Errorf(
- "histogram buckets must be in increasing order: %f >= %f",
- upperBound, h.upperBounds[i+1],
- ))
- }
- } else {
- if math.IsInf(upperBound, +1) {
- // The +Inf bucket is implicit. Remove it here.
- h.upperBounds = h.upperBounds[:i]
- }
- }
- }
- // Finally we know the final length of h.upperBounds and can make buckets
- // for both counts as well as exemplars:
- h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
- atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
- h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
- atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
- h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
-
- h.init(h) // Init self-collection.
- return h
-}
-
-type histogramCounts struct {
- // Order in this struct matters for the alignment required by atomic
- // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
- // sumBits contains the bits of the float64 representing the sum of all
- // observations.
- sumBits uint64
- count uint64
-
- // nativeHistogramZeroBucket counts all (positive and negative)
- // observations in the zero bucket (with an absolute value less or equal
- // the current threshold, see next field.
- nativeHistogramZeroBucket uint64
- // nativeHistogramZeroThresholdBits is the bit pattern of the current
- // threshold for the zero bucket. It's initially equal to
- // nativeHistogramZeroThreshold but may change according to the bucket
- // count limitation strategy.
- nativeHistogramZeroThresholdBits uint64
- // nativeHistogramSchema may change over time according to the bucket
- // count limitation strategy and therefore has to be saved here.
- nativeHistogramSchema int32
- // Number of (positive and negative) sparse buckets.
- nativeHistogramBucketsNumber uint32
-
- // Regular buckets.
- buckets []uint64
-
- // The sparse buckets for native histograms are implemented with a
- // sync.Map for now. A dedicated data structure will likely be more
- // efficient. There are separate maps for negative and positive
- // observations. The map's value is an *int64, counting observations in
- // that bucket. (Note that we don't use uint64 as an int64 won't
- // overflow in practice, and working with signed numbers from the
- // beginning simplifies the handling of deltas.) The map's key is the
- // index of the bucket according to the used
- // nativeHistogramSchema. Index 0 is for an upper bound of 1.
- nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
-}
-
-// observe manages the parts of observe that only affects
-// histogramCounts. doSparse is true if sparse buckets should be done,
-// too.
-func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
- if bucket < len(hc.buckets) {
- atomic.AddUint64(&hc.buckets[bucket], 1)
- }
- atomicAddFloat(&hc.sumBits, v)
- if doSparse && !math.IsNaN(v) {
- var (
- key int
- schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
- zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
- bucketCreated, isInf bool
- )
- if math.IsInf(v, 0) {
- // Pretend v is MaxFloat64 but later increment key by one.
- if math.IsInf(v, +1) {
- v = math.MaxFloat64
- } else {
- v = -math.MaxFloat64
- }
- isInf = true
- }
- frac, exp := math.Frexp(math.Abs(v))
- if schema > 0 {
- bounds := nativeHistogramBounds[schema]
- key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
- } else {
- key = exp
- if frac == 0.5 {
- key--
- }
- offset := (1 << -schema) - 1
- key = (key + offset) >> -schema
- }
- if isInf {
- key++
- }
- switch {
- case v > zeroThreshold:
- bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
- case v < -zeroThreshold:
- bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
- default:
- atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
- }
- if bucketCreated {
- atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hc.count, 1)
-}
-
-type histogram struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // histogramCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the histogram) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cold fields must
- // be merged into the new hot before releasing writeMtx.
- //
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
-
- // Only used in the Write method and for sparse bucket management.
- mtx sync.Mutex
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*histogramCounts
-
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
- nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
- nativeHistogramZeroThreshold float64 // The initial zero threshold.
- nativeHistogramMaxZeroThreshold float64
- nativeHistogramMaxBuckets uint32
- nativeHistogramMinResetDuration time.Duration
- // lastResetTime is protected by mtx. It is also used as created timestamp.
- lastResetTime time.Time
- // resetScheduled is protected by mtx. It is true if a reset is
- // scheduled for a later time (when nativeHistogramMinResetDuration has
- // passed).
- resetScheduled bool
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-
- // afterFunc is for testing purposes, by default it's time.AfterFunc.
- afterFunc func(time.Duration, func()) *time.Timer
-}
-
-func (h *histogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *histogram) Observe(v float64) {
- h.observe(v, h.findBucket(v))
-}
-
-func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
- i := h.findBucket(v)
- h.observe(v, i)
- h.updateExemplar(v, i, e)
-}
-
-func (h *histogram) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := h.counts[n>>63]
- coldCounts := h.counts[(^n)>>63]
-
- waitForCooldown(count, coldCounts)
-
- his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- CreatedTimestamp: timestamppb.New(h.lastResetTime),
- }
- out.Histogram = his
- out.Label = h.labelPairs
-
- var cumCount uint64
- for i, upperBound := range h.upperBounds {
- cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
- his.Bucket[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(cumCount),
- UpperBound: proto.Float64(upperBound),
- }
- if e := h.exemplars[i].Load(); e != nil {
- his.Bucket[i].Exemplar = e.(*dto.Exemplar)
- }
- }
- // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
- if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
- b := &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(math.Inf(1)),
- Exemplar: e.(*dto.Exemplar),
- }
- his.Bucket = append(his.Bucket, b)
- }
- if h.nativeHistogramSchema > math.MinInt32 {
- his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
- his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
- zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
-
- defer func() {
- coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
- coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
- }()
-
- his.ZeroCount = proto.Uint64(zeroBucket)
- his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
- his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
-
- // Add a no-op span to a histogram without observations and with
- // a zero threshold of zero. Otherwise, a native histogram would
- // look like a classic histogram to scrapers.
- if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
- his.PositiveSpan = []*dto.BucketSpan{{
- Offset: proto.Int32(0),
- Length: proto.Uint32(0),
- }}
- }
- }
- addAndResetCounts(hotCounts, coldCounts)
- return nil
-}
-
-// findBucket returns the index of the bucket for the provided value, or
-// len(h.upperBounds) for the +Inf bucket.
-func (h *histogram) findBucket(v float64) int {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
- return sort.SearchFloat64s(h.upperBounds, v)
-}
-
-// observe is the implementation for Observe without the findBucket part.
-func (h *histogram) observe(v float64, bucket int) {
- // Do not add to sparse buckets for NaN observations.
- doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1)
- hotCounts := h.counts[n>>63]
- hotCounts.observe(v, bucket, doSparse)
- if doSparse {
- h.limitBuckets(hotCounts, v, bucket)
- }
-}
-
-// limitBuckets applies a strategy to limit the number of populated sparse
-// buckets. It's generally best effort, and there are situations where the
-// number can go higher (if even the lowest resolution isn't enough to reduce
-// the number sufficiently, or if the provided counts aren't fully updated yet
-// by a concurrently happening Write call).
-func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
- if h.nativeHistogramMaxBuckets == 0 {
- return // No limit configured.
- }
- if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
- return // Bucket limit not exceeded yet.
- }
-
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- // The hot counts might have been swapped just before we acquired the
- // lock. Re-fetch the hot counts first...
- n := atomic.LoadUint64(&h.countAndHotIdx)
- hotIdx := n >> 63
- coldIdx := (^n) >> 63
- hotCounts := h.counts[hotIdx]
- coldCounts := h.counts[coldIdx]
- // ...and then check again if we really have to reduce the bucket count.
- if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
- return // Bucket limit not exceeded after all.
- }
- // Try the various strategies in order.
- if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
- return
- }
- // One of the other strategies will happen. To undo what they will do as
- // soon as enough time has passed to satisfy
- // h.nativeHistogramMinResetDuration, schedule a reset at the right time
- // if we haven't done so already.
- if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
- h.resetScheduled = true
- h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
- }
-
- if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
- return
- }
- h.doubleBucketWidth(hotCounts, coldCounts)
-}
-
-// maybeReset resets the whole histogram if at least
-// h.nativeHistogramMinResetDuration has been passed. It returns true if the
-// histogram has been reset. The caller must have locked h.mtx.
-func (h *histogram) maybeReset(
- hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
-) bool {
- // We are using the possibly mocked h.now() rather than
- // time.Since(h.lastResetTime) to enable testing.
- if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
- h.resetScheduled || // Do not interefere if a reset is already scheduled.
- h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
- return false
- }
- // Completely reset coldCounts.
- h.resetCounts(cold)
- // Repeat the latest observation to not lose it completely.
- cold.observe(value, bucket, true)
- // Make coldCounts the new hot counts while resetting countAndHotIdx.
- n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
- count := n & ((1 << 63) - 1)
- waitForCooldown(count, hot)
- // Finally, reset the formerly hot counts, too.
- h.resetCounts(hot)
- h.lastResetTime = h.now()
- return true
-}
-
-// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
-// called without having locked h.mtx.
-func (h *histogram) reset() {
- h.mtx.Lock()
- defer h.mtx.Unlock()
-
- n := atomic.LoadUint64(&h.countAndHotIdx)
- hotIdx := n >> 63
- coldIdx := (^n) >> 63
- hot := h.counts[hotIdx]
- cold := h.counts[coldIdx]
- // Completely reset coldCounts.
- h.resetCounts(cold)
- // Make coldCounts the new hot counts while resetting countAndHotIdx.
- n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
- count := n & ((1 << 63) - 1)
- waitForCooldown(count, hot)
- // Finally, reset the formerly hot counts, too.
- h.resetCounts(hot)
- h.lastResetTime = h.now()
- h.resetScheduled = false
-}
-
-// maybeWidenZeroBucket widens the zero bucket until it includes the existing
-// buckets closest to the zero bucket (which could be two, if an equidistant
-// negative and a positive bucket exists, but usually it's only one bucket to be
-// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
-// limits how far the zero bucket can be extended, and if that's not enough to
-// include an existing bucket, the method returns false. The caller must have
-// locked h.mtx.
-func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
- currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
- if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
- return false
- }
- // Find the key of the bucket closest to zero.
- smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
- smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
- if smallestNegativeKey < smallestKey {
- smallestKey = smallestNegativeKey
- }
- if smallestKey == math.MaxInt32 {
- return false
- }
- newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
- if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
- return false // New threshold would exceed the max threshold.
- }
- atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
- // Remove applicable buckets.
- if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- }
- if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- }
- // Make cold counts the new hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- count := n & ((1 << 63) - 1)
- // Swap the pointer names to represent the new roles and make
- // the rest less confusing.
- hot, cold = cold, hot
- waitForCooldown(count, cold)
- // Add all the now cold counts to the new hot counts...
- addAndResetCounts(hot, cold)
- // ...adjust the new zero threshold in the cold counts, too...
- atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
- // ...and then merge the newly deleted buckets into the wider zero
- // bucket.
- mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- key := k.(int)
- bucket := v.(*int64)
- if key == smallestKey {
- // Merge into hot zero bucket...
- atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
- // ...and delete from cold counts.
- coldBuckets.Delete(key)
- atomicDecUint32(&cold.nativeHistogramBucketsNumber)
- } else {
- // Add to corresponding hot bucket...
- if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
- atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
- }
- // ...and reset cold bucket.
- atomic.StoreInt64(bucket, 0)
- }
- return true
- }
- }
-
- cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
- cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
- return true
-}
-
-// doubleBucketWidth doubles the bucket width (by decrementing the schema
-// number). Note that very sparse buckets could lead to a low reduction of the
-// bucket count (or even no reduction at all). The method does nothing if the
-// schema is already -4.
-func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
- coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
- if coldSchema == -4 {
- return // Already at lowest resolution.
- }
- coldSchema--
- atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
- // Play it simple and just delete all cold buckets.
- atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
- deleteSyncMap(&cold.nativeHistogramBucketsNegative)
- deleteSyncMap(&cold.nativeHistogramBucketsPositive)
- // Make coldCounts the new hot counts.
- n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
- count := n & ((1 << 63) - 1)
- // Swap the pointer names to represent the new roles and make
- // the rest less confusing.
- hot, cold = cold, hot
- waitForCooldown(count, cold)
- // Add all the now cold counts to the new hot counts...
- addAndResetCounts(hot, cold)
- // ...adjust the schema in the cold counts, too...
- atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
- // ...and then merge the cold buckets into the wider hot buckets.
- merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- key := k.(int)
- bucket := v.(*int64)
- // Adjust key to match the bucket to merge into.
- if key > 0 {
- key++
- }
- key /= 2
- // Add to corresponding hot bucket.
- if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
- atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
- }
- return true
- }
- }
-
- cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
- cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
- // Play it simple again and just delete all cold buckets.
- atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
- deleteSyncMap(&cold.nativeHistogramBucketsNegative)
- deleteSyncMap(&cold.nativeHistogramBucketsPositive)
-}
-
-func (h *histogram) resetCounts(counts *histogramCounts) {
- atomic.StoreUint64(&counts.sumBits, 0)
- atomic.StoreUint64(&counts.count, 0)
- atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
- atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
- atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
- atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
- for i := range h.upperBounds {
- atomic.StoreUint64(&counts.buckets[i], 0)
- }
- deleteSyncMap(&counts.nativeHistogramBucketsNegative)
- deleteSyncMap(&counts.nativeHistogramBucketsPositive)
-}
-
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
-func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
- if l == nil {
- return
- }
- e, err := newExemplar(v, h.now(), l)
- if err != nil {
- panic(err)
- }
- h.exemplars[bucket].Store(e)
-}
-
-// HistogramVec is a Collector that bundles a set of Histograms that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewHistogramVec.
-type HistogramVec struct {
- *MetricVec
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names.
-func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- return V2.NewHistogramVec(HistogramVecOpts{
- HistogramOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
-func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &HistogramVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts.HistogramOpts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Histogram for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Histogram is created.
-//
-// It is possible to call this method without using the returned Histogram to only
-// create the new Histogram but leave it at its starting value, a Histogram without
-// any observations.
-//
-// Keeping the Histogram for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
-// Histogram will still exist, but it will not be exported anymore, even if a
-// Histogram with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Histogram for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Histogram is created. Implications of
-// creating a Histogram without using it and keeping the Histogram for later use
-// are the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
- h, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// With works as GetMetricWith but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *HistogramVec) With(labels Labels) Observer {
- h, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return h
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the HistogramVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &HistogramVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constHistogram struct {
- desc *Desc
- count uint64
- sum float64
- buckets map[float64]uint64
- labelPairs []*dto.LabelPair
- createdTs *timestamppb.Timestamp
-}
-
-func (h *constHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{
- CreatedTimestamp: h.createdTs,
- }
-
- buckets := make([]*dto.Bucket, 0, len(h.buckets))
-
- his.SampleCount = proto.Uint64(h.count)
- his.SampleSum = proto.Float64(h.sum)
- for upperBound, count := range h.buckets {
- buckets = append(buckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(upperBound),
- })
- }
-
- if len(buckets) > 0 {
- sort.Sort(buckSort(buckets))
- }
- his.Bucket = buckets
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- return nil
-}
-
-// NewConstHistogram returns a metric representing a Prometheus histogram with
-// fixed values for the count, sum, and bucket counts. As those parameters
-// cannot be changed, the returned value does not implement the Histogram
-// interface (but only the Metric interface). Users of this package will not
-// have much use for it in regular operations. However, when implementing custom
-// Collectors, it is useful as a throw-away metric that is generated on the fly
-// to send it to Prometheus in the Collect method.
-//
-// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
-//
-// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constHistogram{
- desc: desc,
- count: count,
- sum: sum,
- buckets: buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstHistogram is a version of NewConstHistogram that panics where
-// NewConstHistogram would have returned an error.
-func MustNewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) Metric {
- m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type buckSort []*dto.Bucket
-
-func (s buckSort) Len() int {
- return len(s)
-}
-
-func (s buckSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s buckSort) Less(i, j int) bool {
- return s[i].GetUpperBound() < s[j].GetUpperBound()
-}
-
-// pickSchema returns the largest number n between -4 and 8 such that
-// 2^(2^-n) is less or equal the provided bucketFactor.
-//
-// Special cases:
-// - bucketFactor <= 1: panics.
-// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
-func pickSchema(bucketFactor float64) int32 {
- if bucketFactor <= 1 {
- panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
- }
- floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
- switch {
- case floor <= -8:
- return 8
- case floor >= 4:
- return -4
- default:
- return -int32(floor)
- }
-}
-
-func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
- var ii []int
- buckets.Range(func(k, v interface{}) bool {
- ii = append(ii, k.(int))
- return true
- })
- sort.Ints(ii)
-
- if len(ii) == 0 {
- return nil, nil
- }
-
- var (
- spans []*dto.BucketSpan
- deltas []int64
- prevCount int64
- nextI int
- )
-
- appendDelta := func(count int64) {
- *spans[len(spans)-1].Length++
- deltas = append(deltas, count-prevCount)
- prevCount = count
- }
-
- for n, i := range ii {
- v, _ := buckets.Load(i)
- count := atomic.LoadInt64(v.(*int64))
- // Multiple spans with only small gaps in between are probably
- // encoded more efficiently as one larger span with a few empty
- // buckets. Needs some research to find the sweet spot. For now,
- // we assume that gaps of one or two buckets should not create
- // a new span.
- iDelta := int32(i - nextI)
- if n == 0 || iDelta > 2 {
- // We have to create a new span, either because we are
- // at the very beginning, or because we have found a gap
- // of more than two buckets.
- spans = append(spans, &dto.BucketSpan{
- Offset: proto.Int32(iDelta),
- Length: proto.Uint32(0),
- })
- } else {
- // We have found a small gap (or no gap at all).
- // Insert empty buckets as needed.
- for j := int32(0); j < iDelta; j++ {
- appendDelta(0)
- }
- }
- appendDelta(count)
- nextI = i + 1
- }
- return spans, deltas
-}
-
-// addToBucket increments the sparse bucket at key by the provided amount. It
-// returns true if a new sparse bucket had to be created for that.
-func addToBucket(buckets *sync.Map, key int, increment int64) bool {
- if existingBucket, ok := buckets.Load(key); ok {
- // Fast path without allocation.
- atomic.AddInt64(existingBucket.(*int64), increment)
- return false
- }
- // Bucket doesn't exist yet. Slow path allocating new counter.
- newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
- if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
- // The bucket was created concurrently in another goroutine.
- // Have to increment after all.
- atomic.AddInt64(actualBucket.(*int64), increment)
- return false
- }
- return true
-}
-
-// addAndReset returns a function to be used with sync.Map.Range of spare
-// buckets in coldCounts. It increments the buckets in the provided hotBuckets
-// according to the buckets ranged through. It then resets all buckets ranged
-// through to 0 (but leaves them in place so that they don't need to get
-// recreated on the next scrape).
-func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
- return func(k, v interface{}) bool {
- bucket := v.(*int64)
- if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
- atomic.AddUint32(bucketNumber, 1)
- }
- atomic.StoreInt64(bucket, 0)
- return true
- }
-}
-
-func deleteSyncMap(m *sync.Map) {
- m.Range(func(k, v interface{}) bool {
- m.Delete(k)
- return true
- })
-}
-
-func findSmallestKey(m *sync.Map) int {
- result := math.MaxInt32
- m.Range(func(k, v interface{}) bool {
- key := k.(int)
- if key < result {
- result = key
- }
- return true
- })
- return result
-}
-
-func getLe(key int, schema int32) float64 {
- // Here a bit of context about the behavior for the last bucket counting
- // regular numbers (called simply "last bucket" below) and the bucket
- // counting observations of ±Inf (called "inf bucket" below, with a key
- // one higher than that of the "last bucket"):
- //
- // If we apply the usual formula to the last bucket, its upper bound
- // would be calculated as +Inf. The reason is that the max possible
- // regular float64 number (math.MaxFloat64) doesn't coincide with one of
- // the calculated bucket boundaries. So the calculated boundary has to
- // be larger than math.MaxFloat64, and the only float64 larger than
- // math.MaxFloat64 is +Inf. However, we want to count actual
- // observations of ±Inf in the inf bucket. Therefore, we have to treat
- // the upper bound of the last bucket specially and set it to
- // math.MaxFloat64. (The upper bound of the inf bucket, with its key
- // being one higher than that of the last bucket, naturally comes out as
- // +Inf by the usual formula. So that's fine.)
- //
- // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
- // 1024. If there were a float64 number following math.MaxFloat64, it
- // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
- // of 0.5 and an exp of 1025. However, since frac must be smaller than
- // 1, and exp must be smaller than 1025, either representation overflows
- // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
- // largest possible float64. Q.E.D.) However, the formula for
- // calculating the upper bound from the idx and schema of the last
- // bucket results in precisely that. It is either frac=1.0 & exp=1024
- // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
- // by the way, a power of two where the exponent itself is a power of
- // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
- // schemas.) So these are the special cases we have to catch below.
- if schema < 0 {
- exp := key << -schema
- if exp == 1024 {
- // This is the last bucket before the overflow bucket
- // (for ±Inf observations). Return math.MaxFloat64 as
- // explained above.
- return math.MaxFloat64
- }
- return math.Ldexp(1, exp)
- }
-
- fracIdx := key & ((1 << schema) - 1)
- frac := nativeHistogramBounds[schema][fracIdx]
- exp := (key >> schema) + 1
- if frac == 0.5 && exp == 1025 {
- // This is the last bucket before the overflow bucket (for ±Inf
- // observations). Return math.MaxFloat64 as explained above.
- return math.MaxFloat64
- }
- return math.Ldexp(frac, exp)
-}
-
-// waitForCooldown returns after the count field in the provided histogramCounts
-// has reached the provided count value.
-func waitForCooldown(count uint64, counts *histogramCounts) {
- for count != atomic.LoadUint64(&counts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-}
-
-// atomicAddFloat adds the provided float atomically to another float
-// represented by the bit pattern the bits pointer is pointing to.
-func atomicAddFloat(bits *uint64, v float64) {
- for {
- loadedBits := atomic.LoadUint64(bits)
- newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
- if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
- break
- }
- }
-}
-
-// atomicDecUint32 atomically decrements the uint32 p points to. See
-// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
-func atomicDecUint32(p *uint32) {
- atomic.AddUint32(p, ^uint32(0))
-}
-
-// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
-// bucket) from the cold counts to the corresponding fields in the hot
-// counts. Those fields are then reset to 0 in the cold counts.
-func addAndResetCounts(hot, cold *histogramCounts) {
- atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
- atomic.StoreUint64(&cold.count, 0)
- coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
- atomicAddFloat(&hot.sumBits, coldSum)
- atomic.StoreUint64(&cold.sumBits, 0)
- for i := range hot.buckets {
- atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
- atomic.StoreUint64(&cold.buckets[i], 0)
- }
- atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
- atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
deleted file mode 100644
index 1ed5abe74c..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2015 Björn Rabenstein
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-//
-// The code in this package is copy/paste to avoid a dependency. Hence this file
-// carries the copyright of the original repo.
-// https://github.com/beorn7/floats
-package internal
-
-import (
- "math"
-)
-
-// minNormalFloat64 is the smallest positive normal value of type float64.
-var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
-
-// AlmostEqualFloat64 returns true if a and b are equal within a relative error
-// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
-// details of the applied method.
-func AlmostEqualFloat64(a, b, epsilon float64) bool {
- if a == b {
- return true
- }
- absA := math.Abs(a)
- absB := math.Abs(b)
- diff := math.Abs(a - b)
- if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
- return diff < epsilon*minNormalFloat64
- }
- return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
-}
-
-// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
-func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if !AlmostEqualFloat64(a[i], b[i], epsilon) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
deleted file mode 100644
index a595a20362..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// It provides tools to compare sequences of strings and generate textual diffs.
-//
-// Maintaining `GetUnifiedDiffString` here because original repository
-// (https://github.com/pmezard/go-difflib) is no longer maintained.
-package internal
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func calculateRatio(matches, length int) float64 {
- if length > 0 {
- return 2.0 * float64(matches) / float64(length)
- }
- return 1.0
-}
-
-type Match struct {
- A int
- B int
- Size int
-}
-
-type OpCode struct {
- Tag byte
- I1 int
- I2 int
- J1 int
- J2 int
-}
-
-// SequenceMatcher compares sequence of strings. The basic
-// algorithm predates, and is a little fancier than, an algorithm
-// published in the late 1980's by Ratcliff and Obershelp under the
-// hyperbolic name "gestalt pattern matching". The basic idea is to find
-// the longest contiguous matching subsequence that contains no "junk"
-// elements (R-O doesn't address junk). The same idea is then applied
-// recursively to the pieces of the sequences to the left and to the right
-// of the matching subsequence. This does not yield minimal edit
-// sequences, but does tend to yield matches that "look right" to people.
-//
-// SequenceMatcher tries to compute a "human-friendly diff" between two
-// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
-// longest *contiguous* & junk-free matching subsequence. That's what
-// catches peoples' eyes. The Windows(tm) windiff has another interesting
-// notion, pairing up elements that appear uniquely in each sequence.
-// That, and the method here, appear to yield more intuitive difference
-// reports than does diff. This method appears to be the least vulnerable
-// to synching up on blocks of "junk lines", though (like blank lines in
-// ordinary text files, or maybe "" lines in HTML files). That may be
-// because this is the only method of the 3 that has a *concept* of
-// "junk" .
-//
-// Timing: Basic R-O is cubic time worst case and quadratic time expected
-// case. SequenceMatcher is quadratic time for the worst case and has
-// expected-case behavior dependent in a complicated way on how many
-// elements the sequences have in common; best case time is linear.
-type SequenceMatcher struct {
- a []string
- b []string
- b2j map[string][]int
- IsJunk func(string) bool
- autoJunk bool
- bJunk map[string]struct{}
- matchingBlocks []Match
- fullBCount map[string]int
- bPopular map[string]struct{}
- opCodes []OpCode
-}
-
-func NewMatcher(a, b []string) *SequenceMatcher {
- m := SequenceMatcher{autoJunk: true}
- m.SetSeqs(a, b)
- return &m
-}
-
-func NewMatcherWithJunk(a, b []string, autoJunk bool,
- isJunk func(string) bool,
-) *SequenceMatcher {
- m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
- m.SetSeqs(a, b)
- return &m
-}
-
-// Set two sequences to be compared.
-func (m *SequenceMatcher) SetSeqs(a, b []string) {
- m.SetSeq1(a)
- m.SetSeq2(b)
-}
-
-// Set the first sequence to be compared. The second sequence to be compared is
-// not changed.
-//
-// SequenceMatcher computes and caches detailed information about the second
-// sequence, so if you want to compare one sequence S against many sequences,
-// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
-// sequences.
-//
-// See also SetSeqs() and SetSeq2().
-func (m *SequenceMatcher) SetSeq1(a []string) {
- if &a == &m.a {
- return
- }
- m.a = a
- m.matchingBlocks = nil
- m.opCodes = nil
-}
-
-// Set the second sequence to be compared. The first sequence to be compared is
-// not changed.
-func (m *SequenceMatcher) SetSeq2(b []string) {
- if &b == &m.b {
- return
- }
- m.b = b
- m.matchingBlocks = nil
- m.opCodes = nil
- m.fullBCount = nil
- m.chainB()
-}
-
-func (m *SequenceMatcher) chainB() {
- // Populate line -> index mapping
- b2j := map[string][]int{}
- for i, s := range m.b {
- indices := b2j[s]
- indices = append(indices, i)
- b2j[s] = indices
- }
-
- // Purge junk elements
- m.bJunk = map[string]struct{}{}
- if m.IsJunk != nil {
- junk := m.bJunk
- for s := range b2j {
- if m.IsJunk(s) {
- junk[s] = struct{}{}
- }
- }
- for s := range junk {
- delete(b2j, s)
- }
- }
-
- // Purge remaining popular elements
- popular := map[string]struct{}{}
- n := len(m.b)
- if m.autoJunk && n >= 200 {
- ntest := n/100 + 1
- for s, indices := range b2j {
- if len(indices) > ntest {
- popular[s] = struct{}{}
- }
- }
- for s := range popular {
- delete(b2j, s)
- }
- }
- m.bPopular = popular
- m.b2j = b2j
-}
-
-func (m *SequenceMatcher) isBJunk(s string) bool {
- _, ok := m.bJunk[s]
- return ok
-}
-
-// Find longest matching block in a[alo:ahi] and b[blo:bhi].
-//
-// If IsJunk is not defined:
-//
-// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-//
-// alo <= i <= i+k <= ahi
-// blo <= j <= j+k <= bhi
-//
-// and for all (i',j',k') meeting those conditions,
-//
-// k >= k'
-// i <= i'
-// and if i == i', j <= j'
-//
-// In other words, of all maximal matching blocks, return one that
-// starts earliest in a, and of all those maximal matching blocks that
-// start earliest in a, return the one that starts earliest in b.
-//
-// If IsJunk is defined, first the longest matching block is
-// determined as above, but with the additional restriction that no
-// junk element appears in the block. Then that block is extended as
-// far as possible by matching (only) junk elements on both sides. So
-// the resulting block never matches on junk except as identical junk
-// happens to be adjacent to an "interesting" match.
-//
-// If no blocks match, return (alo, blo, 0).
-func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
- // CAUTION: stripping common prefix or suffix would be incorrect.
- // E.g.,
- // ab
- // acab
- // Longest matching block is "ab", but if common prefix is
- // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- // strip, so ends up claiming that ab is changed to acab by
- // inserting "ca" in the middle. That's minimal but unintuitive:
- // "it's obvious" that someone inserted "ac" at the front.
- // Windiff ends up at the same place as diff, but by pairing up
- // the unique 'b's and then matching the first two 'a's.
- besti, bestj, bestsize := alo, blo, 0
-
- // find longest junk-free match
- // during an iteration of the loop, j2len[j] = length of longest
- // junk-free match ending with a[i-1] and b[j]
- j2len := map[int]int{}
- for i := alo; i != ahi; i++ {
- // look at all instances of a[i] in b; note that because
- // b2j has no junk keys, the loop is skipped if a[i] is junk
- newj2len := map[int]int{}
- for _, j := range m.b2j[m.a[i]] {
- // a[i] matches b[j]
- if j < blo {
- continue
- }
- if j >= bhi {
- break
- }
- k := j2len[j-1] + 1
- newj2len[j] = k
- if k > bestsize {
- besti, bestj, bestsize = i-k+1, j-k+1, k
- }
- }
- j2len = newj2len
- }
-
- // Extend the best by non-junk elements on each end. In particular,
- // "popular" non-junk elements aren't in b2j, which greatly speeds
- // the inner loop above, but also means "the best" match so far
- // doesn't contain any junk *or* popular non-junk elements.
- for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- !m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize++
- }
-
- // Now that we have a wholly interesting match (albeit possibly
- // empty!), we may as well suck up the matching junk on each
- // side of it too. Can't think of a good reason not to, and it
- // saves post-processing the (possibly considerable) expense of
- // figuring out what to do with it. In the case of an empty
- // interesting match, this is clearly the right thing to do,
- // because no other kind of match is possible in the regions.
- for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize++
- }
-
- return Match{A: besti, B: bestj, Size: bestsize}
-}
-
-// Return list of triples describing matching subsequences.
-//
-// Each triple is of the form (i, j, n), and means that
-// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
-// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
-// adjacent triples in the list, and the second is not the last triple in the
-// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
-// adjacent equal blocks.
-//
-// The last triple is a dummy, (len(a), len(b), 0), and is the only
-// triple with n==0.
-func (m *SequenceMatcher) GetMatchingBlocks() []Match {
- if m.matchingBlocks != nil {
- return m.matchingBlocks
- }
-
- var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
- matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
- match := m.findLongestMatch(alo, ahi, blo, bhi)
- i, j, k := match.A, match.B, match.Size
- if match.Size > 0 {
- if alo < i && blo < j {
- matched = matchBlocks(alo, i, blo, j, matched)
- }
- matched = append(matched, match)
- if i+k < ahi && j+k < bhi {
- matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
- }
- }
- return matched
- }
- matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
-
- // It's possible that we have adjacent equal blocks in the
- // matching_blocks list now.
- nonAdjacent := []Match{}
- i1, j1, k1 := 0, 0, 0
- for _, b := range matched {
- // Is this block adjacent to i1, j1, k1?
- i2, j2, k2 := b.A, b.B, b.Size
- if i1+k1 == i2 && j1+k1 == j2 {
- // Yes, so collapse them -- this just increases the length of
- // the first block by the length of the second, and the first
- // block so lengthened remains the block to compare against.
- k1 += k2
- } else {
- // Not adjacent. Remember the first block (k1==0 means it's
- // the dummy we started with), and make the second block the
- // new block to compare against.
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
- i1, j1, k1 = i2, j2, k2
- }
- }
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
- }
-
- nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
- m.matchingBlocks = nonAdjacent
- return m.matchingBlocks
-}
-
-// Return list of 5-tuples describing how to turn a into b.
-//
-// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
-// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
-// tuple preceding it, and likewise for j1 == the previous j2.
-//
-// The tags are characters, with these meanings:
-//
-// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
-//
-// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
-//
-// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
-//
-// 'e' (equal): a[i1:i2] == b[j1:j2]
-func (m *SequenceMatcher) GetOpCodes() []OpCode {
- if m.opCodes != nil {
- return m.opCodes
- }
- i, j := 0, 0
- matching := m.GetMatchingBlocks()
- opCodes := make([]OpCode, 0, len(matching))
- for _, m := range matching {
- // invariant: we've pumped out correct diffs to change
- // a[:i] into b[:j], and the next matching block is
- // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- // out a diff to change a[i:ai] into b[j:bj], pump out
- // the matching block, and move (i,j) beyond the match
- ai, bj, size := m.A, m.B, m.Size
- tag := byte(0)
- if i < ai && j < bj {
- tag = 'r'
- } else if i < ai {
- tag = 'd'
- } else if j < bj {
- tag = 'i'
- }
- if tag > 0 {
- opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
- }
- i, j = ai+size, bj+size
- // the list of matching blocks is terminated by a
- // sentinel with size 0
- if size > 0 {
- opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
- }
- }
- m.opCodes = opCodes
- return m.opCodes
-}
-
-// Isolate change clusters by eliminating ranges with no changes.
-//
-// Return a generator of groups with up to n lines of context.
-// Each group is in the same format as returned by GetOpCodes().
-func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
- if n < 0 {
- n = 3
- }
- codes := m.GetOpCodes()
- if len(codes) == 0 {
- codes = []OpCode{{'e', 0, 1, 0, 1}}
- }
- // Fixup leading and trailing groups if they show no changes.
- if codes[0].Tag == 'e' {
- c := codes[0]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
- }
- if codes[len(codes)-1].Tag == 'e' {
- c := codes[len(codes)-1]
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
- }
- nn := n + n
- groups := [][]OpCode{}
- group := []OpCode{}
- for _, c := range codes {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- // End the current group and start a new one whenever
- // there is a large range with no changes.
- if c.Tag == 'e' && i2-i1 > nn {
- group = append(group, OpCode{
- c.Tag, i1, min(i2, i1+n),
- j1, min(j2, j1+n),
- })
- groups = append(groups, group)
- group = []OpCode{}
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
- }
- group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
- }
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
- groups = append(groups, group)
- }
- return groups
-}
-
-// Return a measure of the sequences' similarity (float in [0,1]).
-//
-// Where T is the total number of elements in both sequences, and
-// M is the number of matches, this is 2.0*M / T.
-// Note that this is 1 if the sequences are identical, and 0 if
-// they have nothing in common.
-//
-// .Ratio() is expensive to compute if you haven't already computed
-// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
-// want to try .QuickRatio() or .RealQuickRation() first to get an
-// upper bound.
-func (m *SequenceMatcher) Ratio() float64 {
- matches := 0
- for _, m := range m.GetMatchingBlocks() {
- matches += m.Size
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() relatively quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute.
-func (m *SequenceMatcher) QuickRatio() float64 {
- // viewing a and b as multisets, set matches to the cardinality
- // of their intersection; this counts the number of matches
- // without regard to order, so is clearly an upper bound
- if m.fullBCount == nil {
- m.fullBCount = map[string]int{}
- for _, s := range m.b {
- m.fullBCount[s]++
- }
- }
-
- // avail[x] is the number of times x appears in 'b' less the
- // number of times we've seen it in 'a' so far ... kinda
- avail := map[string]int{}
- matches := 0
- for _, s := range m.a {
- n, ok := avail[s]
- if !ok {
- n = m.fullBCount[s]
- }
- avail[s] = n - 1
- if n > 0 {
- matches++
- }
- }
- return calculateRatio(matches, len(m.a)+len(m.b))
-}
-
-// Return an upper bound on ratio() very quickly.
-//
-// This isn't defined beyond that it is an upper bound on .Ratio(), and
-// is faster to compute than either .Ratio() or .QuickRatio().
-func (m *SequenceMatcher) RealQuickRatio() float64 {
- la, lb := len(m.a), len(m.b)
- return calculateRatio(min(la, lb), la+lb)
-}
-
-// Convert range to the "ed" format
-func formatRangeUnified(start, stop int) string {
- // Per the diff spec at http://www.unix.org/single_unix_specification/
- beginning := start + 1 // lines start numbering with one
- length := stop - start
- if length == 1 {
- return fmt.Sprintf("%d", beginning)
- }
- if length == 0 {
- beginning-- // empty ranges begin at line just before the range
- }
- return fmt.Sprintf("%d,%d", beginning, length)
-}
-
-// Unified diff parameters
-type UnifiedDiff struct {
- A []string // First sequence lines
- FromFile string // First file name
- FromDate string // First file time
- B []string // Second sequence lines
- ToFile string // Second file name
- ToDate string // Second file time
- Eol string // Headers end of line, defaults to LF
- Context int // Number of context lines
-}
-
-// Compare two sequences of lines; generate the delta as a unified diff.
-//
-// Unified diffs are a compact way of showing line changes and a few
-// lines of context. The number of context lines is set by 'n' which
-// defaults to three.
-//
-// By default, the diff control lines (those with ---, +++, or @@) are
-// created with a trailing newline. This is helpful so that inputs
-// created from file.readlines() result in diffs that are suitable for
-// file.writelines() since both the inputs and outputs have trailing
-// newlines.
-//
-// For inputs that do not have trailing newlines, set the lineterm
-// argument to "" so that the output will be uniformly newline free.
-//
-// The unidiff format normally has a header for filenames and modification
-// times. Any or all of these may be specified using strings for
-// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
-// The modification times are normally expressed in the ISO 8601 format.
-func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
- buf := bufio.NewWriter(writer)
- defer buf.Flush()
- wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
- return err
- }
- ws := func(s string) error {
- _, err := buf.WriteString(s)
- return err
- }
-
- if len(diff.Eol) == 0 {
- diff.Eol = "\n"
- }
-
- started := false
- m := NewMatcher(diff.A, diff.B)
- for _, g := range m.GetGroupedOpCodes(diff.Context) {
- if !started {
- started = true
- fromDate := ""
- if len(diff.FromDate) > 0 {
- fromDate = "\t" + diff.FromDate
- }
- toDate := ""
- if len(diff.ToDate) > 0 {
- toDate = "\t" + diff.ToDate
- }
- if diff.FromFile != "" || diff.ToFile != "" {
- err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
- if err != nil {
- return err
- }
- err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
- if err != nil {
- return err
- }
- }
- }
- first, last := g[0], g[len(g)-1]
- range1 := formatRangeUnified(first.I1, last.I2)
- range2 := formatRangeUnified(first.J1, last.J2)
- if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
- return err
- }
- for _, c := range g {
- i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
- if c.Tag == 'e' {
- for _, line := range diff.A[i1:i2] {
- if err := ws(" " + line); err != nil {
- return err
- }
- }
- continue
- }
- if c.Tag == 'r' || c.Tag == 'd' {
- for _, line := range diff.A[i1:i2] {
- if err := ws("-" + line); err != nil {
- return err
- }
- }
- }
- if c.Tag == 'r' || c.Tag == 'i' {
- for _, line := range diff.B[j1:j2] {
- if err := ws("+" + line); err != nil {
- return err
- }
- }
- }
- }
- }
- return nil
-}
-
-// Like WriteUnifiedDiff but returns the diff a string.
-func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
- w := &bytes.Buffer{}
- err := WriteUnifiedDiff(w, diff)
- return w.String(), err
-}
-
-// Split a string on "\n" while preserving them. The output can be used
-// as input for UnifiedDiff and ContextDiff structures.
-func SplitLines(s string) []string {
- lines := strings.SplitAfter(s, "\n")
- lines[len(lines)-1] += "\n"
- return lines
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
deleted file mode 100644
index 723b45d644..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import "regexp"
-
-type GoCollectorRule struct {
- Matcher *regexp.Regexp
- Deny bool
-}
-
-// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
-// Use it via collectors package instead. See issue
-// https://github.com/prometheus/client_golang/issues/1030.
-//
-// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
-type GoCollectorOptions struct {
- DisableMemStatsLikeMetrics bool
- RuntimeMetricSumForHist map[string]string
- RuntimeMetricRules []GoCollectorRule
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
deleted file mode 100644
index 97d17d6cb6..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.17
-// +build go1.17
-
-package internal
-
-import (
- "math"
- "path"
- "runtime/metrics"
- "strings"
-
- "github.com/prometheus/common/model"
-)
-
-// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
-// metric description and validates whether the metric is suitable for integration
-// with Prometheus.
-//
-// Returns false if a name could not be produced, or if Prometheus does not understand
-// the runtime/metrics Kind.
-//
-// Note that the main reason a name couldn't be produced is if the runtime/metrics
-// package exports a name with characters outside the valid Prometheus metric name
-// character set. This is theoretically possible, but should never happen in practice.
-// Still, don't rely on it.
-func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
- namespace := "go"
-
- comp := strings.SplitN(d.Name, ":", 2)
- key := comp[0]
- unit := comp[1]
-
- // The last path element in the key is the name,
- // the rest is the subsystem.
- subsystem := path.Dir(key[1:] /* remove leading / */)
- name := path.Base(key)
-
- // subsystem is translated by replacing all / and - with _.
- subsystem = strings.ReplaceAll(subsystem, "/", "_")
- subsystem = strings.ReplaceAll(subsystem, "-", "_")
-
- // unit is translated assuming that the unit contains no
- // non-ASCII characters.
- unit = strings.ReplaceAll(unit, "-", "_")
- unit = strings.ReplaceAll(unit, "*", "_")
- unit = strings.ReplaceAll(unit, "/", "_per_")
-
- // name has - replaced with _ and is concatenated with the unit and
- // other data.
- name = strings.ReplaceAll(name, "-", "_")
- name += "_" + unit
- if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
- name += "_total"
- }
-
- valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
- switch d.Kind {
- case metrics.KindUint64:
- case metrics.KindFloat64:
- case metrics.KindFloat64Histogram:
- default:
- valid = false
- }
- return namespace, subsystem, name, valid
-}
-
-// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
-// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
-// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
-// as the bottom-most upper-bound inclusive bucket in Prometheus.
-func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
- switch unit {
- case "bytes":
- // Re-bucket as powers of 2.
- return reBucketExp(buckets, 2)
- case "seconds":
- // Re-bucket as powers of 10 and then merge all buckets greater
- // than 1 second into the +Inf bucket.
- b := reBucketExp(buckets, 10)
- for i := range b {
- if b[i] <= 1 {
- continue
- }
- b[i] = math.Inf(1)
- b = b[:i+1]
- break
- }
- return b
- }
- return buckets
-}
-
-// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
-// downsamples the buckets to those a multiple of base apart. The end result
-// is a roughly exponential (in many cases, perfectly exponential) bucketing
-// scheme.
-func reBucketExp(buckets []float64, base float64) []float64 {
- bucket := buckets[0]
- var newBuckets []float64
- // We may see a -Inf here, in which case, add it and skip it
- // since we risk producing NaNs otherwise.
- //
- // We need to preserve -Inf values to maintain runtime/metrics
- // conventions. We'll strip it out later.
- if bucket == math.Inf(-1) {
- newBuckets = append(newBuckets, bucket)
- buckets = buckets[1:]
- bucket = buckets[0]
- }
- // From now on, bucket should always have a non-Inf value because
- // Infs are only ever at the ends of the bucket lists, so
- // arithmetic operations on it are non-NaN.
- for i := 1; i < len(buckets); i++ {
- if bucket >= 0 && buckets[i] < bucket*base {
- // The next bucket we want to include is at least bucket*base.
- continue
- } else if bucket < 0 && buckets[i] < bucket/base {
- // In this case the bucket we're targeting is negative, and since
- // we're ascending through buckets here, we need to divide to get
- // closer to zero exponentially.
- continue
- }
- // The +Inf bucket will always be the last one, and we'll always
- // end up including it here because bucket
- newBuckets = append(newBuckets, bucket)
- bucket = buckets[i]
- }
- return append(newBuckets, bucket)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
deleted file mode 100644
index 6515c11480..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "sort"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type LabelPairSorter []*dto.LabelPair
-
-func (s LabelPairSorter) Len() int {
- return len(s)
-}
-
-func (s LabelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s LabelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
-// MetricSorter is a sortable slice of *dto.Metric.
-type MetricSorter []*dto.Metric
-
-func (s MetricSorter) Len() int {
- return len(s)
-}
-
-func (s MetricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s MetricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// NormalizeMetricFamilies returns a MetricFamily slice with empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
- for _, mf := range metricFamiliesByName {
- sort.Sort(MetricSorter(mf.Metric))
- }
- names := make([]string, 0, len(metricFamiliesByName))
- for name, mf := range metricFamiliesByName {
- if len(mf.Metric) > 0 {
- names = append(names, name)
- }
- }
- sort.Strings(names)
- result := make([]*dto.MetricFamily, 0, len(names))
- for _, name := range names {
- result = append(result, metricFamiliesByName[name])
- }
- return result
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
deleted file mode 100644
index c21911f292..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "strings"
- "unicode/utf8"
-
- "github.com/prometheus/common/model"
-)
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-//
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
-// LabelConstraint normalizes label values.
-type LabelConstraint func(string) string
-
-// ConstrainedLabels represents a label name and its constrain function
-// to normalize label values. This type is commonly used when constructing
-// metric vector Collectors.
-type ConstrainedLabel struct {
- Name string
- Constraint LabelConstraint
-}
-
-// ConstrainableLabels is an interface that allows creating of labels that can
-// be optionally constrained.
-//
-// prometheus.V2().NewCounterVec(CounterVecOpts{
-// CounterOpts: {...}, // Usual CounterOpts fields
-// VariableLabels: []ConstrainedLabels{
-// {Name: "A"},
-// {Name: "B", Constraint: func(v string) string { ... }},
-// },
-// })
-type ConstrainableLabels interface {
- compile() *compiledLabels
- labelNames() []string
-}
-
-// ConstrainedLabels represents a collection of label name -> constrain function
-// to normalize label values. This type is commonly used when constructing
-// metric vector Collectors.
-type ConstrainedLabels []ConstrainedLabel
-
-func (cls ConstrainedLabels) compile() *compiledLabels {
- compiled := &compiledLabels{
- names: make([]string, len(cls)),
- labelConstraints: map[string]LabelConstraint{},
- }
-
- for i, label := range cls {
- compiled.names[i] = label.Name
- if label.Constraint != nil {
- compiled.labelConstraints[label.Name] = label.Constraint
- }
- }
-
- return compiled
-}
-
-func (cls ConstrainedLabels) labelNames() []string {
- names := make([]string, len(cls))
- for i, label := range cls {
- names[i] = label.Name
- }
- return names
-}
-
-// UnconstrainedLabels represents collection of label without any constraint on
-// their value. Thus, it is simply a collection of label names.
-//
-// UnconstrainedLabels([]string{ "A", "B" })
-//
-// is equivalent to
-//
-// ConstrainedLabels {
-// { Name: "A" },
-// { Name: "B" },
-// }
-type UnconstrainedLabels []string
-
-func (uls UnconstrainedLabels) compile() *compiledLabels {
- return &compiledLabels{
- names: uls,
- }
-}
-
-func (uls UnconstrainedLabels) labelNames() []string {
- return uls
-}
-
-type compiledLabels struct {
- names []string
- labelConstraints map[string]LabelConstraint
-}
-
-func (cls *compiledLabels) compile() *compiledLabels {
- return cls
-}
-
-func (cls *compiledLabels) labelNames() []string {
- return cls.names
-}
-
-func (cls *compiledLabels) constrain(labelName, value string) string {
- if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
- return fn(value)
- }
- return value
-}
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
- return fmt.Errorf(
- "%w: %q has %d variable labels named %q but %d values %q were provided",
- errInconsistentCardinality, fqName,
- len(labels), labels,
- len(labelValues), labelValues,
- )
-}
-
-func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
- if len(labels) != expectedNumberOfValues {
- return fmt.Errorf(
- "%w: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(labels), labels,
- )
- }
-
- for name, val := range labels {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
- }
- }
-
- return nil
-}
-
-func validateLabelValues(vals []string, expectedNumberOfValues int) error {
- if len(vals) != expectedNumberOfValues {
- // The call below makes vals escape, copy them to avoid that.
- vals := append([]string(nil), vals...)
- return fmt.Errorf(
- "%w: expected %d label values but got %d in %#v",
- errInconsistentCardinality, expectedNumberOfValues,
- len(vals), vals,
- )
- }
-
- for _, val := range vals {
- if !utf8.ValidString(val) {
- return fmt.Errorf("label value %q is not valid UTF-8", val)
- }
- }
-
- return nil
-}
-
-func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
deleted file mode 100644
index f018e57237..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "math"
- "sort"
- "strings"
- "time"
-
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
- "google.golang.org/protobuf/proto"
-)
-
-var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
-
-// A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementations of Metric in this package are Gauge, Counter,
-// Histogram, Summary, and Untyped.
-type Metric interface {
- // Desc returns the descriptor for the Metric. This method idempotently
- // returns the same descriptor throughout the lifetime of the
- // Metric. The returned descriptor is immutable by contract. A Metric
- // unable to describe itself must return an invalid descriptor (created
- // with NewInvalidDesc).
- Desc() *Desc
- // Write encodes the Metric into a "Metric" Protocol Buffer data
- // transmission object.
- //
- // Metric implementations must observe concurrency safety as reads of
- // this metric may occur at any time, and any blocking occurs at the
- // expense of total performance of rendering all registered
- // metrics. Ideally, Metric implementations should support concurrent
- // readers.
- //
- // While populating dto.Metric, it is the responsibility of the
- // implementation to ensure validity of the Metric protobuf (like valid
- // UTF-8 strings or syntactically valid metric and label names). It is
- // recommended to sort labels lexicographically. Callers of Write should
- // still make sure of sorting if they depend on it.
- Write(*dto.Metric) error
- // TODO(beorn7): The original rationale of passing in a pre-allocated
- // dto.Metric protobuf to save allocations has disappeared. The
- // signature of this method should be changed to "Write() (*dto.Metric,
- // error)".
-}
-
-// Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just
-// an alias of this type (which might change when the requirement arises.)
-//
-// It is mandatory to set Name to a non-empty string. All other fields are
-// optional and can safely be left at their zero value, although it is strongly
-// encouraged to set a Help string.
-type Opts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Metric (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the metric must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this metric.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-// BuildFQName joins the given three name components by "_". Empty name
-// components are ignored. If the name parameter itself is empty, an empty
-// string is returned, no matter what. Metric implementations included in this
-// library use this function internally to generate the fully-qualified metric
-// name from the name component in their Opts. Users of the library will only
-// need this function if they implement their own Metric or instantiate a Desc
-// (with NewDesc) directly.
-func BuildFQName(namespace, subsystem, name string) string {
- if name == "" {
- return ""
- }
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
- }
- return name
-}
-
-type invalidMetric struct {
- desc *Desc
- err error
-}
-
-// NewInvalidMetric returns a metric whose Write method always returns the
-// provided error. It is useful if a Collector finds itself unable to collect
-// a metric and wishes to report an error to the registry.
-func NewInvalidMetric(desc *Desc, err error) Metric {
- return &invalidMetric{desc, err}
-}
-
-func (m *invalidMetric) Desc() *Desc { return m.desc }
-
-func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
-
-type timestampedMetric struct {
- Metric
- t time.Time
-}
-
-func (m timestampedMetric) Write(pb *dto.Metric) error {
- e := m.Metric.Write(pb)
- pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
- return e
-}
-
-// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
-// way that it has an explicit timestamp set to the provided Time. This is only
-// useful in rare cases as the timestamp of a Prometheus metric should usually
-// be set by the Prometheus server during scraping. Exceptions include mirroring
-// metrics with given timestamps from other metric
-// sources.
-//
-// NewMetricWithTimestamp works best with MustNewConstMetric,
-// MustNewConstHistogram, and MustNewConstSummary, see example.
-//
-// Currently, the exposition formats used by Prometheus are limited to
-// millisecond resolution. Thus, the provided time will be rounded down to the
-// next full millisecond value.
-func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
- return timestampedMetric{Metric: m, t: t}
-}
-
-type withExemplarsMetric struct {
- Metric
-
- exemplars []*dto.Exemplar
-}
-
-func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
- if err := m.Metric.Write(pb); err != nil {
- return err
- }
-
- switch {
- case pb.Counter != nil:
- pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
- case pb.Histogram != nil:
- for _, e := range m.exemplars {
- // pb.Histogram.Bucket are sorted by UpperBound.
- i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
- return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
- })
- if i < len(pb.Histogram.Bucket) {
- pb.Histogram.Bucket[i].Exemplar = e
- } else {
- // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
- b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
- UpperBound: proto.Float64(math.Inf(1)),
- Exemplar: e,
- }
- pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
- }
- }
- default:
- // TODO(bwplotka): Implement Gauge?
- return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
- }
-
- return nil
-}
-
-// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
-type Exemplar struct {
- Value float64
- Labels Labels
- // Optional.
- // Default value (time.Time{}) indicates its empty, which should be
- // understood as time.Now() time at the moment of creation of metric.
- Timestamp time.Time
-}
-
-// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
-// exemplars. Exemplars are validated.
-//
-// Only last applicable exemplar is injected from the list.
-// For example for Counter it means last exemplar is injected.
-// For Histogram, it means last applicable exemplar for each bucket is injected.
-//
-// NewMetricWithExemplars works best with MustNewConstMetric and
-// MustNewConstHistogram, see example.
-func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
- if len(exemplars) == 0 {
- return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
- }
-
- var (
- now = time.Now()
- exs = make([]*dto.Exemplar, len(exemplars))
- err error
- )
- for i, e := range exemplars {
- ts := e.Timestamp
- if ts == (time.Time{}) {
- ts = now
- }
- exs[i], err = newExemplar(e.Value, ts, e.Labels)
- if err != nil {
- return nil, err
- }
- }
-
- return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
-}
-
-// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
-// NewMetricWithExemplars would have returned an error.
-func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
- ret, err := NewMetricWithExemplars(m, exemplars...)
- if err != nil {
- panic(err)
- }
- return ret
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
deleted file mode 100644
index 7c12b21087..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !js || wasm
-// +build !js wasm
-
-package prometheus
-
-import "runtime"
-
-// getRuntimeNumThreads returns the number of open OS threads.
-func getRuntimeNumThreads() float64 {
- n, _ := runtime.ThreadCreateProfile(nil)
- return float64(n)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
deleted file mode 100644
index 7348df01df..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js && !wasm
-// +build js,!wasm
-
-package prometheus
-
-// getRuntimeNumThreads returns the number of open OS threads.
-func getRuntimeNumThreads() float64 {
- return 1
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
deleted file mode 100644
index 03773b21f7..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Observer is the interface that wraps the Observe method, which is used by
-// Histogram and Summary to add observations.
-type Observer interface {
- Observe(float64)
-}
-
-// The ObserverFunc type is an adapter to allow the use of ordinary
-// functions as Observers. If f is a function with the appropriate
-// signature, ObserverFunc(f) is an Observer that calls f.
-//
-// This adapter is usually used in connection with the Timer type, and there are
-// two general use cases:
-//
-// The most common one is to use a Gauge as the Observer for a Timer.
-// See the "Gauge" Timer example.
-//
-// The more advanced use case is to create a function that dynamically decides
-// which Observer to use for observing the duration. See the "Complex" Timer
-// example.
-type ObserverFunc func(float64)
-
-// Observe calls f(value). It implements Observer.
-func (f ObserverFunc) Observe(value float64) {
- f(value)
-}
-
-// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
-type ObserverVec interface {
- GetMetricWith(Labels) (Observer, error)
- GetMetricWithLabelValues(lvs ...string) (Observer, error)
- With(Labels) Observer
- WithLabelValues(...string) Observer
- CurryWith(Labels) (ObserverVec, error)
- MustCurryWith(Labels) ObserverVec
-
- Collector
-}
-
-// ExemplarObserver is implemented by Observers that offer the option of
-// observing a value together with an exemplar. Its ObserveWithExemplar method
-// works like the Observe method of an Observer but also replaces the currently
-// saved exemplar (if any) with a new one, created from the provided value, the
-// current time as timestamp, and the provided Labels. Empty Labels will lead to
-// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
-// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 128 runes in total.
-type ExemplarObserver interface {
- ObserveWithExemplar(value float64, exemplar Labels)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
deleted file mode 100644
index 8548dd18ed..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
-}
-
-// ProcessCollectorOpts defines the behavior of a process metrics collector
-// created with NewProcessCollector.
-type ProcessCollectorOpts struct {
- // PidFn returns the PID of the process the collector collects metrics
- // for. It is called upon each collection. By default, the PID of the
- // current process is used, as determined on construction time by
- // calling os.Getpid().
- PidFn func() (int, error)
- // If non-empty, each of the collected metrics is prefixed by the
- // provided string and an underscore ("_").
- Namespace string
- // If true, any error encountered during collection is reported as an
- // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
- // and the collected metrics will be incomplete. (Possibly, no metrics
- // will be collected at all.) While that's usually not desired, it is
- // appropriate for the common "mix-in" of process metrics, where process
- // metrics are nice to have, but failing to collect them should not
- // disrupt the collection of the remaining metrics.
- ReportErrors bool
-}
-
-// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewProcessCollector instead.
-func NewProcessCollector(opts ProcessCollectorOpts) Collector {
- ns := ""
- if len(opts.Namespace) > 0 {
- ns = opts.Namespace + "_"
- }
-
- c := &processCollector{
- reportErrors: opts.ReportErrors,
- cpuTotal: NewDesc(
- ns+"process_cpu_seconds_total",
- "Total user and system CPU time spent in seconds.",
- nil, nil,
- ),
- openFDs: NewDesc(
- ns+"process_open_fds",
- "Number of open file descriptors.",
- nil, nil,
- ),
- maxFDs: NewDesc(
- ns+"process_max_fds",
- "Maximum number of open file descriptors.",
- nil, nil,
- ),
- vsize: NewDesc(
- ns+"process_virtual_memory_bytes",
- "Virtual memory size in bytes.",
- nil, nil,
- ),
- maxVsize: NewDesc(
- ns+"process_virtual_memory_max_bytes",
- "Maximum amount of virtual memory available in bytes.",
- nil, nil,
- ),
- rss: NewDesc(
- ns+"process_resident_memory_bytes",
- "Resident memory size in bytes.",
- nil, nil,
- ),
- startTime: NewDesc(
- ns+"process_start_time_seconds",
- "Start time of the process since unix epoch in seconds.",
- nil, nil,
- ),
- }
-
- if opts.PidFn == nil {
- c.pidFn = getPIDFn()
- } else {
- c.pidFn = opts.PidFn
- }
-
- // Set up process metric collection if supported by the runtime.
- if canCollectProcess() {
- c.collectFn = c.processCollect
- } else {
- c.collectFn = func(ch chan<- Metric) {
- c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
- }
- }
-
- return c
-}
-
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal
- ch <- c.openFDs
- ch <- c.maxFDs
- ch <- c.vsize
- ch <- c.maxVsize
- ch <- c.rss
- ch <- c.startTime
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *processCollector) Collect(ch chan<- Metric) {
- c.collectFn(ch)
-}
-
-func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
- if !c.reportErrors {
- return
- }
- if desc == nil {
- desc = NewInvalidDesc(err)
- }
- ch <- NewInvalidMetric(desc, err)
-}
-
-// NewPidFileFn returns a function that retrieves a pid from the specified file.
-// It is meant to be used for the PidFn field in ProcessCollectorOpts.
-func NewPidFileFn(pidFilePath string) func() (int, error) {
- return func() (int, error) {
- content, err := os.ReadFile(pidFilePath)
- if err != nil {
- return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
- }
- pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
- if err != nil {
- return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
- }
-
- return pid, nil
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
deleted file mode 100644
index b1e363d6cf..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build js
-// +build js
-
-package prometheus
-
-func canCollectProcess() bool {
- return false
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- // noop on this platform
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
deleted file mode 100644
index 8c1136ceea..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !js && !wasip1
-// +build !windows,!js,!wasip1
-
-package prometheus
-
-import (
- "github.com/prometheus/procfs"
-)
-
-func canCollectProcess() bool {
- _, err := procfs.NewDefaultFS()
- return err == nil
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- if stat, err := p.Stat(); err == nil {
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
- if startTime, err := stat.StartTime(); err == nil {
- ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
- } else {
- c.reportError(ch, c.startTime, err)
- }
- } else {
- c.reportError(ch, nil, err)
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
- } else {
- c.reportError(ch, c.openFDs, err)
- }
-
- if limits, err := p.Limits(); err == nil {
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
- ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
- } else {
- c.reportError(ch, nil, err)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
deleted file mode 100644
index d8d9a6d7a2..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build wasip1
-// +build wasip1
-
-package prometheus
-
-func canCollectProcess() bool {
- return false
-}
-
-func (*processCollector) processCollect(chan<- Metric) {
- // noop on this platform
- return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
deleted file mode 100644
index f973398df2..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-func canCollectProcess() bool {
- return true
-}
-
-var (
- modpsapi = syscall.NewLazyDLL("psapi.dll")
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
-
- procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
- procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
-)
-
-type processMemoryCounters struct {
- // System interface description
- // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
-
- // Refer to the Golang internal implementation
- // https://golang.org/src/internal/syscall/windows/psapi_windows.go
- _ uint32
- PageFaultCount uint32
- PeakWorkingSetSize uintptr
- WorkingSetSize uintptr
- QuotaPeakPagedPoolUsage uintptr
- QuotaPagedPoolUsage uintptr
- QuotaPeakNonPagedPoolUsage uintptr
- QuotaNonPagedPoolUsage uintptr
- PagefileUsage uintptr
- PeakPagefileUsage uintptr
- PrivateUsage uintptr
-}
-
-func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
- mem := processMemoryCounters{}
- r1, _, err := procGetProcessMemoryInfo.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&mem)),
- uintptr(unsafe.Sizeof(mem)),
- )
- if r1 != 1 {
- return mem, err
- } else {
- return mem, nil
- }
-}
-
-func getProcessHandleCount(handle windows.Handle) (uint32, error) {
- var count uint32
- r1, _, err := procGetProcessHandleCount.Call(
- uintptr(handle),
- uintptr(unsafe.Pointer(&count)),
- )
- if r1 != 1 {
- return 0, err
- } else {
- return count, nil
- }
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- h, err := windows.GetCurrentProcess()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- var startTime, exitTime, kernelTime, userTime windows.Filetime
- err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
-
- mem, err := getProcessMemoryInfo(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
-
- handles, err := getProcessHandleCount(h)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
-}
-
-func fileTimeToSeconds(ft windows.Filetime) float64 {
- return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
deleted file mode 100644
index 9819917b83..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-const (
- closeNotifier = 1 << iota
- flusher
- hijacker
- readerFrom
- pusher
-)
-
-type delegator interface {
- http.ResponseWriter
-
- Status() int
- Written() int64
-}
-
-type responseWriterDelegator struct {
- http.ResponseWriter
-
- status int
- written int64
- wroteHeader bool
- observeWriteHeader func(int)
-}
-
-func (r *responseWriterDelegator) Status() int {
- return r.status
-}
-
-func (r *responseWriterDelegator) Written() int64 {
- return r.written
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
- if r.observeWriteHeader != nil && !r.wroteHeader {
- // Only call observeWriteHeader for the 1st time. It's a bug if
- // WriteHeader is called more than once, but we want to protect
- // against it here. Note that we still delegate the WriteHeader
- // to the original ResponseWriter to not mask the bug from it.
- r.observeWriteHeader(code)
- }
- r.status = code
- r.wroteHeader = true
- r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- n, err := r.ResponseWriter.Write(b)
- r.written += int64(n)
- return n, err
-}
-
-type (
- closeNotifierDelegator struct{ *responseWriterDelegator }
- flusherDelegator struct{ *responseWriterDelegator }
- hijackerDelegator struct{ *responseWriterDelegator }
- readerFromDelegator struct{ *responseWriterDelegator }
- pusherDelegator struct{ *responseWriterDelegator }
-)
-
-func (d closeNotifierDelegator) CloseNotify() <-chan bool {
- //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
- return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-func (d flusherDelegator) Flush() {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- d.ResponseWriter.(http.Flusher).Flush()
-}
-
-func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return d.ResponseWriter.(http.Hijacker).Hijack()
-}
-
-func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
- // If applicable, call WriteHeader here so that observeWriteHeader is
- // handled appropriately.
- if !d.wroteHeader {
- d.WriteHeader(http.StatusOK)
- }
- n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
- d.written += n
- return n, err
-}
-
-func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
- return d.ResponseWriter.(http.Pusher).Push(target, opts)
-}
-
-var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
-
-func init() {
- // TODO(beorn7): Code generation would help here.
- pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
- return d
- }
- pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
- return closeNotifierDelegator{d}
- }
- pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
- return flusherDelegator{d}
- }
- pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
- return struct {
- *responseWriterDelegator
- http.Flusher
- http.CloseNotifier
- }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
- return hijackerDelegator{d}
- }
- pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.CloseNotifier
- }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- }{d, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
- return struct {
- *responseWriterDelegator
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
- return readerFromDelegator{d}
- }
- pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.CloseNotifier
- }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- }{d, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- }{d, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
- return struct {
- *responseWriterDelegator
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
- return pusherDelegator{d}
- }
- pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- }{d, pusherDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- }{d, pusherDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
- return struct {
- *responseWriterDelegator
- http.Pusher
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- }{d, pusherDelegator{d}, readerFromDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
- }
- pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
- return struct {
- *responseWriterDelegator
- http.Pusher
- io.ReaderFrom
- http.Hijacker
- http.Flusher
- http.CloseNotifier
- }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
- }
-}
-
-func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
- d := &responseWriterDelegator{
- ResponseWriter: w,
- observeWriteHeader: observeWriteHeaderFunc,
- }
-
- id := 0
- //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
- if _, ok := w.(http.CloseNotifier); ok {
- id += closeNotifier
- }
- if _, ok := w.(http.Flusher); ok {
- id += flusher
- }
- if _, ok := w.(http.Hijacker); ok {
- id += hijacker
- }
- if _, ok := w.(io.ReaderFrom); ok {
- id += readerFrom
- }
- if _, ok := w.(http.Pusher); ok {
- id += pusher
- }
-
- return pickDelegator[id](d)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
deleted file mode 100644
index 09b8d2fbea..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ /dev/null
@@ -1,408 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promhttp provides tooling around HTTP servers and clients.
-//
-// First, the package allows the creation of http.Handler instances to expose
-// Prometheus metrics via HTTP. promhttp.Handler acts on the
-// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
-// custom registry or anything that implements the Gatherer interface. It also
-// allows the creation of handlers that act differently on errors or allow to
-// log errors.
-//
-// Second, the package provides tooling to instrument instances of http.Handler
-// via middleware. Middleware wrappers follow the naming scheme
-// InstrumentHandlerX, where X describes the intended use of the middleware.
-// See each function's doc comment for specific details.
-//
-// Finally, the package allows for an http.RoundTripper to be instrumented via
-// middleware. Middleware wrappers follow the naming scheme
-// InstrumentRoundTripperX, where X describes the intended use of the
-// middleware. See each function's doc comment for specific details.
-package promhttp
-
-import (
- "compress/gzip"
- "errors"
- "fmt"
- "io"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/prometheus/common/expfmt"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
- processStartTimeHeader = "Process-Start-Time-Unix"
-)
-
-var gzipPool = sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
-}
-
-// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
-// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
-// no error logging, and it applies compression if requested by the client.
-//
-// The returned http.Handler is already instrumented using the
-// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
-// create multiple http.Handlers by separate calls of the Handler function, the
-// metrics used for instrumentation will be shared between them, providing
-// global scrape counts.
-//
-// This function is meant to cover the bulk of basic use cases. If you are doing
-// anything that requires more customization (including using a non-default
-// Gatherer, different instrumentation, and non-default HandlerOpts), use the
-// HandlerFor function. See there for details.
-func Handler() http.Handler {
- return InstrumentMetricHandler(
- prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
- )
-}
-
-// HandlerFor returns an uninstrumented http.Handler for the provided
-// Gatherer. The behavior of the Handler is defined by the provided
-// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
-// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
-// instrumentation. Use the InstrumentMetricHandler function to apply the same
-// kind of instrumentation as it is used by the Handler function.
-func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
- return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
-}
-
-// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
-// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
-// call to `done` of that `Gather`.
-func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
- var (
- inFlightSem chan struct{}
- errCnt = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_errors_total",
- Help: "Total number of internal errors encountered by the promhttp metric handler.",
- },
- []string{"cause"},
- )
- )
-
- if opts.MaxRequestsInFlight > 0 {
- inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
- }
- if opts.Registry != nil {
- // Initialize all possibilities that can occur below.
- errCnt.WithLabelValues("gathering")
- errCnt.WithLabelValues("encoding")
- if err := opts.Registry.Register(errCnt); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- errCnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
- }
-
- h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
- if !opts.ProcessStartTime.IsZero() {
- rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
- }
- if inFlightSem != nil {
- select {
- case inFlightSem <- struct{}{}: // All good, carry on.
- defer func() { <-inFlightSem }()
- default:
- http.Error(rsp, fmt.Sprintf(
- "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
- ), http.StatusServiceUnavailable)
- return
- }
- }
- mfs, done, err := reg.Gather()
- defer done()
- if err != nil {
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error gathering metrics:", err)
- }
- errCnt.WithLabelValues("gathering").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case ContinueOnError:
- if len(mfs) == 0 {
- // Still report the error if no metrics have been gathered.
- httpError(rsp, err)
- return
- }
- case HTTPErrorOnError:
- httpError(rsp, err)
- return
- }
- }
-
- var contentType expfmt.Format
- if opts.EnableOpenMetrics {
- contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
- } else {
- contentType = expfmt.Negotiate(req.Header)
- }
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
-
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
-
- gz.Reset(w)
- defer gz.Close()
-
- w = gz
- }
-
- enc := expfmt.NewEncoder(w, contentType)
-
- // handleError handles the error according to opts.ErrorHandling
- // and returns true if we have to abort after the handling.
- handleError := func(err error) bool {
- if err == nil {
- return false
- }
- if opts.ErrorLog != nil {
- opts.ErrorLog.Println("error encoding and sending metric family:", err)
- }
- errCnt.WithLabelValues("encoding").Inc()
- switch opts.ErrorHandling {
- case PanicOnError:
- panic(err)
- case HTTPErrorOnError:
- // We cannot really send an HTTP error at this
- // point because we most likely have written
- // something to rsp already. But at least we can
- // stop sending.
- return true
- }
- // Do nothing in all other cases, including ContinueOnError.
- return false
- }
-
- for _, mf := range mfs {
- if handleError(enc.Encode(mf)) {
- return
- }
- }
- if closer, ok := enc.(expfmt.Closer); ok {
- // This in particular takes care of the final "# EOF\n" line for OpenMetrics.
- if handleError(closer.Close()) {
- return
- }
- }
- })
-
- if opts.Timeout <= 0 {
- return h
- }
- return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
- "Exceeded configured timeout of %v.\n",
- opts.Timeout,
- ))
-}
-
-// InstrumentMetricHandler is usually used with an http.Handler returned by the
-// HandlerFor function. It instruments the provided http.Handler with two
-// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
-// scrapes partitioned by HTTP status code, and a gauge
-// "promhttp_metric_handler_requests_in_flight" to track the number of
-// simultaneous scrapes. This function idempotently registers collectors for
-// both metrics with the provided Registerer. It panics if the registration
-// fails. The provided metrics are useful to see how many scrapes hit the
-// monitored target (which could be from different Prometheus servers or other
-// scrapers), and how often they overlap (which would result in more than one
-// scrape in flight at the same time). Note that the scrapes-in-flight gauge
-// will contain the scrape by which it is exposed, while the scrape counter will
-// only get incremented after the scrape is complete (as only then the status
-// code is known). For tracking scrape durations, use the
-// "scrape_duration_seconds" gauge created by the Prometheus server upon each
-// scrape.
-func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
- cnt := prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "promhttp_metric_handler_requests_total",
- Help: "Total number of scrapes by HTTP status code.",
- },
- []string{"code"},
- )
- // Initialize the most likely HTTP status codes.
- cnt.WithLabelValues("200")
- cnt.WithLabelValues("500")
- cnt.WithLabelValues("503")
- if err := reg.Register(cnt); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- cnt = are.ExistingCollector.(*prometheus.CounterVec)
- } else {
- panic(err)
- }
- }
-
- gge := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "promhttp_metric_handler_requests_in_flight",
- Help: "Current number of scrapes being served.",
- })
- if err := reg.Register(gge); err != nil {
- are := &prometheus.AlreadyRegisteredError{}
- if errors.As(err, are) {
- gge = are.ExistingCollector.(prometheus.Gauge)
- } else {
- panic(err)
- }
- }
-
- return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
-}
-
-// HandlerErrorHandling defines how a Handler serving metrics will handle
-// errors.
-type HandlerErrorHandling int
-
-// These constants cause handlers serving metrics to behave as described if
-// errors are encountered.
-const (
- // Serve an HTTP status code 500 upon the first error
- // encountered. Report the error message in the body. Note that HTTP
- // errors cannot be served anymore once the beginning of a regular
- // payload has been sent. Thus, in the (unlikely) case that encoding the
- // payload into the negotiated wire format fails, serving the response
- // will simply be aborted. Set an ErrorLog in HandlerOpts to detect
- // those errors.
- HTTPErrorOnError HandlerErrorHandling = iota
- // Ignore errors and try to serve as many metrics as possible. However,
- // if no metrics can be served, serve an HTTP status code 500 and the
- // last error message in the body. Only use this in deliberate "best
- // effort" metrics collection scenarios. In this case, it is highly
- // recommended to provide other means of detecting errors: By setting an
- // ErrorLog in HandlerOpts, the errors are logged. By providing a
- // Registry in HandlerOpts, the exposed metrics include an error counter
- // "promhttp_metric_handler_errors_total", which can be used for
- // alerts.
- ContinueOnError
- // Panic upon the first error encountered (useful for "crash only" apps).
- PanicOnError
-)
-
-// Logger is the minimal interface HandlerOpts needs for logging. Note that
-// log.Logger from the standard library implements this interface, and it is
-// easy to implement by custom loggers, if they don't do so already anyway.
-type Logger interface {
- Println(v ...interface{})
-}
-
-// HandlerOpts specifies options how to serve metrics via an http.Handler. The
-// zero value of HandlerOpts is a reasonable default.
-type HandlerOpts struct {
- // ErrorLog specifies an optional Logger for errors collecting and
- // serving metrics. If nil, errors are not logged at all. Note that the
- // type of a reported error is often prometheus.MultiError, which
- // formats into a multi-line error string. If you want to avoid the
- // latter, create a Logger implementation that detects a
- // prometheus.MultiError and formats the contained errors into one line.
- ErrorLog Logger
- // ErrorHandling defines how errors are handled. Note that errors are
- // logged regardless of the configured ErrorHandling provided ErrorLog
- // is not nil.
- ErrorHandling HandlerErrorHandling
- // If Registry is not nil, it is used to register a metric
- // "promhttp_metric_handler_errors_total", partitioned by "cause". A
- // failed registration causes a panic. Note that this error counter is
- // different from the instrumentation you get from the various
- // InstrumentHandler... helpers. It counts errors that don't necessarily
- // result in a non-2xx HTTP status code. There are two typical cases:
- // (1) Encoding errors that only happen after streaming of the HTTP body
- // has already started (and the status code 200 has been sent). This
- // should only happen with custom collectors. (2) Collection errors with
- // no effect on the HTTP status code because ErrorHandling is set to
- // ContinueOnError.
- Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
- DisableCompression bool
- // The number of concurrent HTTP requests is limited to
- // MaxRequestsInFlight. Additional requests are responded to with 503
- // Service Unavailable and a suitable message in the body. If
- // MaxRequestsInFlight is 0 or negative, no limit is applied.
- MaxRequestsInFlight int
- // If handling a request takes longer than Timeout, it is responded to
- // with 503 ServiceUnavailable and a suitable Message. No timeout is
- // applied if Timeout is 0 or negative. Note that with the current
- // implementation, reaching the timeout simply ends the HTTP requests as
- // described above (and even that only if sending of the body hasn't
- // started yet), while the bulk work of gathering all the metrics keeps
- // running in the background (with the eventual result to be thrown
- // away). Until the implementation is improved, it is recommended to
- // implement a separate timeout in potentially slow Collectors.
- Timeout time.Duration
- // If true, the experimental OpenMetrics encoding is added to the
- // possible options during content negotiation. Note that Prometheus
- // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
- // the only way to transmit exemplars. However, the move to OpenMetrics
- // is not completely transparent. Most notably, the values of "quantile"
- // labels of Summaries and "le" labels of Histograms are formatted with
- // a trailing ".0" if they would otherwise look like integer numbers
- // (which changes the identity of the resulting series on the Prometheus
- // server).
- EnableOpenMetrics bool
- // ProcessStartTime allows setting process start timevalue that will be exposed
- // with "Process-Start-Time-Unix" response header along with the metrics
- // payload. This allow callers to have efficient transformations to cumulative
- // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
- // scrape target.
- // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
- // exposition format.
- ProcessStartTime time.Time
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
-}
-
-// httpError removes any content-encoding header and then calls http.Error with
-// the provided error and http.StatusInternalServerError. Error contents is
-// supposed to be uncompressed plain text. Same as with a plain http.Error, this
-// must not be called if the header or any payload has already been sent.
-func httpError(rsp http.ResponseWriter, err error) {
- rsp.Header().Del(contentEncodingHeader)
- http.Error(
- rsp,
- "An error has occurred while serving metrics:\n\n"+err.Error(),
- http.StatusInternalServerError,
- )
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
deleted file mode 100644
index d3482c40ca..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// The RoundTripperFunc type is an adapter to allow the use of ordinary
-// functions as RoundTrippers. If f is a function with the appropriate
-// signature, RountTripperFunc(f) is a RoundTripper that calls f.
-type RoundTripperFunc func(req *http.Request) (*http.Response, error)
-
-// RoundTrip implements the RoundTripper interface.
-func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
- return rt(r)
-}
-
-// InstrumentRoundTripperInFlight is a middleware that wraps the provided
-// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.RoundTripper.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return func(r *http.Request) (*http.Response, error) {
- gauge.Inc()
- defer gauge.Dec()
- return next.RoundTrip(r)
- }
-}
-
-// InstrumentRoundTripperCounter is a middleware that wraps the provided
-// http.RoundTripper to observe the request result with the provided CounterVec.
-// The CounterVec must have zero, one, or two non-const non-curried labels. For
-// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. For the "method" label a predefined default label value set
-// is used to filter given values. Values besides predefined values will count
-// as `unknown` method.`WithExtraMethods` can be used to add more
-// methods to the set. Partitioning of the CounterVec happens by HTTP status code
-// and/or HTTP method if the respective instance label names are present in the
-// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
-// is not incremented.
-//
-// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := defaultOptions()
- for _, o := range opts {
- o.apply(rtOpts)
- }
-
- // Curry the counter with dynamic labels before checking the remaining labels.
- code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
-
- return func(r *http.Request) (*http.Response, error) {
- resp, err := next.RoundTrip(r)
- if err == nil {
- l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
- for label, resolve := range rtOpts.extraLabelsFromCtx {
- l[label] = resolve(resp.Request.Context())
- }
- addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
- }
- return resp, err
- }
-}
-
-// InstrumentRoundTripperDuration is a middleware that wraps the provided
-// http.RoundTripper to observe the request duration with the provided
-// ObserverVec. The ObserverVec must have zero, one, or two non-const
-// non-curried labels. For those, the only allowed label names are "code" and
-// "method". The function panics otherwise. For the "method" label a predefined
-// default label value set is used to filter given values. Values besides
-// predefined values will count as `unknown` method. `WithExtraMethods`
-// can be used to add more methods to the set. The Observe method of the Observer
-// in the ObserverVec is called with the request duration in
-// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped RoundTripper panics or returns a non-nil error, no values are
-// reported.
-//
-// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := defaultOptions()
- for _, o := range opts {
- o.apply(rtOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
-
- return func(r *http.Request) (*http.Response, error) {
- start := time.Now()
- resp, err := next.RoundTrip(r)
- if err == nil {
- l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
- for label, resolve := range rtOpts.extraLabelsFromCtx {
- l[label] = resolve(resp.Request.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
- }
- return resp, err
- }
-}
-
-// InstrumentTrace is used to offer flexibility in instrumenting the available
-// httptrace.ClientTrace hook functions. Each function is passed a float64
-// representing the time in seconds since the start of the http request. A user
-// may choose to use separately buckets Histograms, or implement custom
-// instance labels on a per function basis.
-type InstrumentTrace struct {
- GotConn func(float64)
- PutIdleConn func(float64)
- GotFirstResponseByte func(float64)
- Got100Continue func(float64)
- DNSStart func(float64)
- DNSDone func(float64)
- ConnectStart func(float64)
- ConnectDone func(float64)
- TLSHandshakeStart func(float64)
- TLSHandshakeDone func(float64)
- WroteHeaders func(float64)
- Wait100Continue func(float64)
- WroteRequest func(float64)
-}
-
-// InstrumentRoundTripperTrace is a middleware that wraps the provided
-// RoundTripper and reports times to hook functions provided in the
-// InstrumentTrace struct. Hook functions that are not present in the provided
-// InstrumentTrace struct are ignored. Times reported to the hook functions are
-// time since the start of the request. Only with Go1.9+, those times are
-// guaranteed to never be negative. (Earlier Go versions are not using a
-// monotonic clock.) Note that partitioning of Histograms is expensive and
-// should be used judiciously.
-//
-// For hook functions that receive an error as an argument, no observations are
-// made in the event of a non-nil error value.
-//
-// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return func(r *http.Request) (*http.Response, error) {
- start := time.Now()
-
- trace := &httptrace.ClientTrace{
- GotConn: func(_ httptrace.GotConnInfo) {
- if it.GotConn != nil {
- it.GotConn(time.Since(start).Seconds())
- }
- },
- PutIdleConn: func(err error) {
- if err != nil {
- return
- }
- if it.PutIdleConn != nil {
- it.PutIdleConn(time.Since(start).Seconds())
- }
- },
- DNSStart: func(_ httptrace.DNSStartInfo) {
- if it.DNSStart != nil {
- it.DNSStart(time.Since(start).Seconds())
- }
- },
- DNSDone: func(_ httptrace.DNSDoneInfo) {
- if it.DNSDone != nil {
- it.DNSDone(time.Since(start).Seconds())
- }
- },
- ConnectStart: func(_, _ string) {
- if it.ConnectStart != nil {
- it.ConnectStart(time.Since(start).Seconds())
- }
- },
- ConnectDone: func(_, _ string, err error) {
- if err != nil {
- return
- }
- if it.ConnectDone != nil {
- it.ConnectDone(time.Since(start).Seconds())
- }
- },
- GotFirstResponseByte: func() {
- if it.GotFirstResponseByte != nil {
- it.GotFirstResponseByte(time.Since(start).Seconds())
- }
- },
- Got100Continue: func() {
- if it.Got100Continue != nil {
- it.Got100Continue(time.Since(start).Seconds())
- }
- },
- TLSHandshakeStart: func() {
- if it.TLSHandshakeStart != nil {
- it.TLSHandshakeStart(time.Since(start).Seconds())
- }
- },
- TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
- if err != nil {
- return
- }
- if it.TLSHandshakeDone != nil {
- it.TLSHandshakeDone(time.Since(start).Seconds())
- }
- },
- WroteHeaders: func() {
- if it.WroteHeaders != nil {
- it.WroteHeaders(time.Since(start).Seconds())
- }
- },
- Wait100Continue: func() {
- if it.Wait100Continue != nil {
- it.Wait100Continue(time.Since(start).Seconds())
- }
- },
- WroteRequest: func(_ httptrace.WroteRequestInfo) {
- if it.WroteRequest != nil {
- it.WroteRequest(time.Since(start).Seconds())
- }
- },
- }
- r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
-
- return next.RoundTrip(r)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
deleted file mode 100644
index 356edb7868..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "errors"
- "net/http"
- "strconv"
- "strings"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// magicString is used for the hacky label test in checkLabels. Remove once fixed.
-const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
-
-// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
-// which falls back to [prometheus.Observer.Observe] if no labels are provided.
-func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
- if labels == nil {
- obs.Observe(val)
- return
- }
- obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
-}
-
-// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
-// which falls back to [prometheus.Counter.Add] if no labels are provided.
-func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
- if labels == nil {
- obs.Add(val)
- return
- }
- obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
-}
-
-// InstrumentHandlerInFlight is a middleware that wraps the provided
-// http.Handler. It sets the provided prometheus.Gauge to the number of
-// requests currently handled by the wrapped http.Handler.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- g.Inc()
- defer g.Dec()
- next.ServeHTTP(w, r)
- })
-}
-
-// InstrumentHandlerDuration is a middleware that wraps the provided
-// http.Handler to observe the request duration with the provided ObserverVec.
-// The ObserverVec must have valid metric and label names and must have zero,
-// one, or two non-const non-curried labels. For those, the only allowed label
-// names are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the request duration
-// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
-// the respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- next.ServeHTTP(w, r)
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
-// to observe the request result with the provided CounterVec. The CounterVec
-// must have valid metric and label names and must have zero, one, or two
-// non-const non-curried labels. For those, the only allowed label names are
-// "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
-// CounterVec happens by HTTP status code and/or HTTP method if the respective
-// instance label names are present in the CounterVec. For unpartitioned
-// counting, use a CounterVec with zero labels.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, the Counter is not incremented.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the counter with dynamic labels before checking the remaining labels.
- code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
-
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
-// http.Handler to observe with the provided ObserverVec the request duration
-// until the response headers are written. The ObserverVec must have valid
-// metric and label names and must have zero, one, or two non-const non-curried
-// labels. For those, the only allowed label names are "code" and "method". The
-// function panics otherwise. For the "method" label a predefined default label
-// value set is used to filter given values. Values besides predefined values
-// will count as `unknown` method.`WithExtraMethods` can be used to add more
-// methods to the set. The Observe method of the Observer in the
-// ObserverVec is called with the request duration in seconds. Partitioning
-// happens by HTTP status code and/or HTTP method if the respective instance
-// label names are present in the ObserverVec. For unpartitioned observations,
-// use an ObserverVec with zero labels. Note that partitioning of Histograms is
-// expensive and should be used judiciously.
-//
-// If the wrapped Handler panics before calling WriteHeader, no value is
-// reported.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- return func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
- d := newDelegator(w, func(status int) {
- l := labels(code, method, r.Method, status, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
- })
- next.ServeHTTP(d, r)
- }
-}
-
-// InstrumentHandlerRequestSize is a middleware that wraps the provided
-// http.Handler to observe the request size with the provided ObserverVec. The
-// ObserverVec must have valid metric and label names and must have zero, one,
-// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the request size in
-// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- if code {
- return func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
- size := computeApproximateRequestSize(r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
- }
- }
-
- return func(w http.ResponseWriter, r *http.Request) {
- next.ServeHTTP(w, r)
- size := computeApproximateRequestSize(r)
-
- l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
- }
-}
-
-// InstrumentHandlerResponseSize is a middleware that wraps the provided
-// http.Handler to observe the response size with the provided ObserverVec. The
-// ObserverVec must have valid metric and label names and must have zero, one,
-// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. For the "method"
-// label a predefined default label value set is used to filter given values.
-// Values besides predefined values will count as `unknown` method.
-// `WithExtraMethods` can be used to add more methods to the set. The Observe
-// method of the Observer in the ObserverVec is called with the response size in
-// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
-// respective instance label names are present in the ObserverVec. For
-// unpartitioned observations, use an ObserverVec with zero labels. Note that
-// partitioning of Histograms is expensive and should be used judiciously.
-//
-// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
-//
-// If the wrapped Handler panics, no values are reported.
-//
-// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
- hOpts := defaultOptions()
- for _, o := range opts {
- o.apply(hOpts)
- }
-
- // Curry the observer with dynamic labels before checking the remaining labels.
- code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- d := newDelegator(w, nil)
- next.ServeHTTP(d, r)
-
- l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
- for label, resolve := range hOpts.extraLabelsFromCtx {
- l[label] = resolve(r.Context())
- }
- observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
- })
-}
-
-// checkLabels returns whether the provided Collector has a non-const,
-// non-curried label named "code" and/or "method". It panics if the provided
-// Collector does not have a Desc or has more than one Desc or its Desc is
-// invalid. It also panics if the Collector has any non-const, non-curried
-// labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code, method bool) {
- // TODO(beorn7): Remove this hacky way to check for instance labels
- // once Descriptors can have their dimensionality queried.
- var (
- desc *prometheus.Desc
- m prometheus.Metric
- pm dto.Metric
- lvs []string
- )
-
- // Get the Desc from the Collector.
- descc := make(chan *prometheus.Desc, 1)
- c.Describe(descc)
-
- select {
- case desc = <-descc:
- default:
- panic("no description provided by collector")
- }
- select {
- case <-descc:
- panic("more than one description provided by collector")
- default:
- }
-
- close(descc)
-
- // Make sure the Collector has a valid Desc by registering it with a
- // temporary registry.
- prometheus.NewRegistry().MustRegister(c)
-
- // Create a ConstMetric with the Desc. Since we don't know how many
- // variable labels there are, try for as long as it needs.
- for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
- m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
- }
-
- // Write out the metric into a proto message and look at the labels.
- // If the value is not the magicString, it is a constLabel, which doesn't interest us.
- // If the label is curried, it doesn't interest us.
- // In all other cases, only "code" or "method" is allowed.
- if err := m.Write(&pm); err != nil {
- panic("error checking metric for labels")
- }
- for _, label := range pm.Label {
- name, value := label.GetName(), label.GetValue()
- if value != magicString || isLabelCurried(c, name) {
- continue
- }
- switch name {
- case "code":
- code = true
- case "method":
- method = true
- default:
- panic("metric partitioned with non-supported labels")
- }
- }
- return
-}
-
-func isLabelCurried(c prometheus.Collector, label string) bool {
- // This is even hackier than the label test above.
- // We essentially try to curry again and see if it works.
- // But for that, we need to type-convert to the two
- // types we use here, ObserverVec or *CounterVec.
- switch v := c.(type) {
- case *prometheus.CounterVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- case prometheus.ObserverVec:
- if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
- return false
- }
- default:
- panic("unsupported metric vec type")
- }
- return true
-}
-
-func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
- labels := prometheus.Labels{}
-
- if !(code || method) {
- return labels
- }
-
- if code {
- labels["code"] = sanitizeCode(status)
- }
- if method {
- labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
- }
-
- return labels
-}
-
-func computeApproximateRequestSize(r *http.Request) int {
- s := 0
- if r.URL != nil {
- s += len(r.URL.String())
- }
-
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- return s
-}
-
-// If the wrapped http.Handler has a known method, it will be sanitized and returned.
-// Otherwise, "unknown" will be returned. The known method list can be extended
-// as needed by using extraMethods parameter.
-func sanitizeMethod(m string, extraMethods ...string) string {
- // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
- // the methods chosen as default.
- switch m {
- case "GET", "get":
- return "get"
- case "PUT", "put":
- return "put"
- case "HEAD", "head":
- return "head"
- case "POST", "post":
- return "post"
- case "DELETE", "delete":
- return "delete"
- case "CONNECT", "connect":
- return "connect"
- case "OPTIONS", "options":
- return "options"
- case "NOTIFY", "notify":
- return "notify"
- case "TRACE", "trace":
- return "trace"
- case "PATCH", "patch":
- return "patch"
- default:
- for _, method := range extraMethods {
- if strings.EqualFold(m, method) {
- return strings.ToLower(m)
- }
- }
- return "unknown"
- }
-}
-
-// If the wrapped http.Handler has not set a status code, i.e. the value is
-// currently 0, sanitizeCode will return 200, for consistency with behavior in
-// the stdlib.
-func sanitizeCode(s int) string {
- // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
- switch s {
- case 100:
- return "100"
- case 101:
- return "101"
-
- case 200, 0:
- return "200"
- case 201:
- return "201"
- case 202:
- return "202"
- case 203:
- return "203"
- case 204:
- return "204"
- case 205:
- return "205"
- case 206:
- return "206"
-
- case 300:
- return "300"
- case 301:
- return "301"
- case 302:
- return "302"
- case 304:
- return "304"
- case 305:
- return "305"
- case 307:
- return "307"
-
- case 400:
- return "400"
- case 401:
- return "401"
- case 402:
- return "402"
- case 403:
- return "403"
- case 404:
- return "404"
- case 405:
- return "405"
- case 406:
- return "406"
- case 407:
- return "407"
- case 408:
- return "408"
- case 409:
- return "409"
- case 410:
- return "410"
- case 411:
- return "411"
- case 412:
- return "412"
- case 413:
- return "413"
- case 414:
- return "414"
- case 415:
- return "415"
- case 416:
- return "416"
- case 417:
- return "417"
- case 418:
- return "418"
-
- case 500:
- return "500"
- case 501:
- return "501"
- case 502:
- return "502"
- case 503:
- return "503"
- case 504:
- return "504"
- case 505:
- return "505"
-
- case 428:
- return "428"
- case 429:
- return "429"
- case 431:
- return "431"
- case 511:
- return "511"
-
- default:
- if s >= 100 && s <= 599 {
- return strconv.Itoa(s)
- }
- return "unknown"
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
deleted file mode 100644
index 5d4383aa14..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promhttp
-
-import (
- "context"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// Option are used to configure both handler (middleware) or round tripper.
-type Option interface {
- apply(*options)
-}
-
-// LabelValueFromCtx are used to compute the label value from request context.
-// Context can be filled with values from request through middleware.
-type LabelValueFromCtx func(ctx context.Context) string
-
-// options store options for both a handler or round tripper.
-type options struct {
- extraMethods []string
- getExemplarFn func(requestCtx context.Context) prometheus.Labels
- extraLabelsFromCtx map[string]LabelValueFromCtx
-}
-
-func defaultOptions() *options {
- return &options{
- getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
- extraLabelsFromCtx: map[string]LabelValueFromCtx{},
- }
-}
-
-func (o *options) emptyDynamicLabels() prometheus.Labels {
- labels := prometheus.Labels{}
-
- for label := range o.extraLabelsFromCtx {
- labels[label] = ""
- }
-
- return labels
-}
-
-type optionApplyFunc func(*options)
-
-func (o optionApplyFunc) apply(opt *options) { o(opt) }
-
-// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
-// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
-//
-// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
-func WithExtraMethods(methods ...string) Option {
- return optionApplyFunc(func(o *options) {
- o.extraMethods = methods
- })
-}
-
-// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
-// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
-// metric will continue to observe/increment.
-func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
- return optionApplyFunc(func(o *options) {
- o.getExemplarFn = getExemplarFn
- })
-}
-
-// WithLabelFromCtx registers a label for dynamic resolution with access to context.
-// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
-func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
- return optionApplyFunc(func(o *options) {
- o.extraLabelsFromCtx[name] = valueFn
- })
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
deleted file mode 100644
index 5e2ced25a0..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ /dev/null
@@ -1,1075 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "unicode/utf8"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- "github.com/cespare/xxhash/v2"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/expfmt"
- "google.golang.org/protobuf/proto"
-)
-
-const (
- // Capacity for the channel to collect metrics and descriptors.
- capMetricChan = 1000
- capDescChan = 10
-)
-
-// DefaultRegisterer and DefaultGatherer are the implementations of the
-// Registerer and Gatherer interface a number of convenience functions in this
-// package act on. Initially, both variables point to the same Registry, which
-// has a process collector (currently on Linux only, see NewProcessCollector)
-// and a Go collector (see NewGoCollector, in particular the note about
-// stop-the-world implication with Go versions older than 1.9) already
-// registered. This approach to keep default instances as global state mirrors
-// the approach of other packages in the Go standard library. Note that there
-// are caveats. Change the variables with caution and only if you understand the
-// consequences. Users who want to avoid global state altogether should not use
-// the convenience functions and act on custom instances instead.
-var (
- defaultRegistry = NewRegistry()
- DefaultRegisterer Registerer = defaultRegistry
- DefaultGatherer Gatherer = defaultRegistry
-)
-
-func init() {
- MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
- MustRegister(NewGoCollector())
-}
-
-// NewRegistry creates a new vanilla Registry without any Collectors
-// pre-registered.
-func NewRegistry() *Registry {
- return &Registry{
- collectorsByID: map[uint64]Collector{},
- descIDs: map[uint64]struct{}{},
- dimHashesByName: map[string]uint64{},
- }
-}
-
-// NewPedanticRegistry returns a registry that checks during collection if each
-// collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry. Unchecked Collectors (those whose
-// Describe method does not yield any descriptors) are excluded from the check.
-//
-// Usually, a Registry will be happy as long as the union of all collected
-// Metrics is consistent and valid even if some metrics are not consistent with
-// their own Desc or a Desc provided by their registered Collector. Well-behaved
-// Collectors and Metrics will only provide consistent Descs. This Registry is
-// useful to test the implementation of Collectors and Metrics.
-func NewPedanticRegistry() *Registry {
- r := NewRegistry()
- r.pedanticChecksEnabled = true
- return r
-}
-
-// Registerer is the interface for the part of a registry in charge of
-// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather than the Registry type
-// directly). In that way, they are free to use custom Registerer implementation
-// (e.g. for testing purposes).
-type Registerer interface {
- // Register registers a new Collector to be included in metrics
- // collection. It returns an error if the descriptors provided by the
- // Collector are invalid or if they — in combination with descriptors of
- // already registered Collectors — do not fulfill the consistency and
- // uniqueness criteria described in the documentation of metric.Desc.
- //
- // If the provided Collector is equal to a Collector already registered
- // (which includes the case of re-registering the same Collector), the
- // returned error is an instance of AlreadyRegisteredError, which
- // contains the previously registered Collector.
- //
- // A Collector whose Describe method does not yield any Desc is treated
- // as unchecked. Registration will always succeed. No check for
- // re-registering (see previous paragraph) is performed. Thus, the
- // caller is responsible for not double-registering the same unchecked
- // Collector, and for providing a Collector that will not cause
- // inconsistent metrics on collection. (This would lead to scrape
- // errors.)
- Register(Collector) error
- // MustRegister works like Register but registers any number of
- // Collectors and panics upon the first registration that causes an
- // error.
- MustRegister(...Collector)
- // Unregister unregisters the Collector that equals the Collector passed
- // in as an argument. (Two Collectors are considered equal if their
- // Describe method yields the same set of descriptors.) The function
- // returns whether a Collector was unregistered. Note that an unchecked
- // Collector cannot be unregistered (as its Describe method does not
- // yield any descriptor).
- //
- // Note that even after unregistering, it will not be possible to
- // register a new Collector that is inconsistent with the unregistered
- // Collector, e.g. a Collector collecting metrics with the same name but
- // a different help string. The rationale here is that the same registry
- // instance must only collect consistent metrics throughout its
- // lifetime.
- Unregister(Collector) bool
-}
-
-// Gatherer is the interface for the part of a registry in charge of gathering
-// the collected metrics into a number of MetricFamilies. The Gatherer interface
-// comes with the same general implication as described for the Registerer
-// interface.
-type Gatherer interface {
- // Gather calls the Collect method of the registered Collectors and then
- // gathers the collected metrics into a lexicographically sorted slice
- // of uniquely named MetricFamily protobufs. Gather ensures that the
- // returned slice is valid and self-consistent so that it can be used
- // for valid exposition. As an exception to the strict consistency
- // requirements described for metric.Desc, Gather will tolerate
- // different sets of label names for metrics of the same metric family.
- //
- // Even if an error occurs, Gather attempts to gather as many metrics as
- // possible. Hence, if a non-nil error is returned, the returned
- // MetricFamily slice could be nil (in case of a fatal error that
- // prevented any meaningful metric collection) or contain a number of
- // MetricFamily protobufs, some of which might be incomplete, and some
- // might be missing altogether. The returned error (which might be a
- // MultiError) explains the details. Note that this is mostly useful for
- // debugging purposes. If the gathered protobufs are to be used for
- // exposition in actual monitoring, it is almost always better to not
- // expose an incomplete result and instead disregard the returned
- // MetricFamily protobufs in case the returned error is non-nil.
- Gather() ([]*dto.MetricFamily, error)
-}
-
-// Register registers the provided Collector with the DefaultRegisterer.
-//
-// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
-// details.
-func Register(c Collector) error {
- return DefaultRegisterer.Register(c)
-}
-
-// MustRegister registers the provided Collectors with the DefaultRegisterer and
-// panics if any error occurs.
-//
-// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
-// there for more details.
-func MustRegister(cs ...Collector) {
- DefaultRegisterer.MustRegister(cs...)
-}
-
-// Unregister removes the registration of the provided Collector from the
-// DefaultRegisterer.
-//
-// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
-// more details.
-func Unregister(c Collector) bool {
- return DefaultRegisterer.Unregister(c)
-}
-
-// GathererFunc turns a function into a Gatherer.
-type GathererFunc func() ([]*dto.MetricFamily, error)
-
-// Gather implements Gatherer.
-func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
- return gf()
-}
-
-// AlreadyRegisteredError is returned by the Register method if the Collector to
-// be registered has already been registered before, or a different Collector
-// that collects the same metrics has been registered before. Registration fails
-// in that case, but you can detect from the kind of error what has
-// happened. The error contains fields for the existing Collector and the
-// (rejected) new Collector that equals the existing one. This can be used to
-// find out if an equal Collector has been registered before and switch over to
-// using the old one, as demonstrated in the example.
-type AlreadyRegisteredError struct {
- ExistingCollector, NewCollector Collector
-}
-
-func (err AlreadyRegisteredError) Error() string {
- return "duplicate metrics collector registration attempted"
-}
-
-// MultiError is a slice of errors implementing the error interface. It is used
-// by a Gatherer to report multiple errors during MetricFamily gathering.
-type MultiError []error
-
-// Error formats the contained errors as a bullet point list, preceded by the
-// total number of errors. Note that this results in a multi-line string.
-func (errs MultiError) Error() string {
- if len(errs) == 0 {
- return ""
- }
- buf := &bytes.Buffer{}
- fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
- for _, err := range errs {
- fmt.Fprintf(buf, "\n* %s", err)
- }
- return buf.String()
-}
-
-// Append appends the provided error if it is not nil.
-func (errs *MultiError) Append(err error) {
- if err != nil {
- *errs = append(*errs, err)
- }
-}
-
-// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
-// contained error as error if len(errs is 1). In all other cases, it returns
-// the MultiError directly. This is helpful for returning a MultiError in a way
-// that only uses the MultiError if needed.
-func (errs MultiError) MaybeUnwrap() error {
- switch len(errs) {
- case 0:
- return nil
- case 1:
- return errs[0]
- default:
- return errs
- }
-}
-
-// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
-// and Collector. The zero value is not usable. Create instances with
-// NewRegistry or NewPedanticRegistry.
-//
-// Registry implements Collector to allow it to be used for creating groups of
-// metrics. See the Grouping example for how this can be done.
-type Registry struct {
- mtx sync.RWMutex
- collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
- descIDs map[uint64]struct{}
- dimHashesByName map[string]uint64
- uncheckedCollectors []Collector
- pedanticChecksEnabled bool
-}
-
-// Register implements Registerer.
-func (r *Registry) Register(c Collector) error {
- var (
- descChan = make(chan *Desc, capDescChan)
- newDescIDs = map[uint64]struct{}{}
- newDimHashesByName = map[string]uint64{}
- collectorID uint64 // All desc IDs XOR'd together.
- duplicateDescErr error
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- r.mtx.Lock()
- defer func() {
- // Drain channel in case of premature return to not leak a goroutine.
- for range descChan {
- }
- r.mtx.Unlock()
- }()
- // Conduct various tests...
- for desc := range descChan {
-
- // Is the descriptor valid at all?
- if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
- }
-
- // Is the descID unique?
- // (In other words: Is the fqName + constLabel combination unique?)
- if _, exists := r.descIDs[desc.id]; exists {
- duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
- }
- // If it is not a duplicate desc in this collector, XOR it to
- // the collectorID. (We allow duplicate descs within the same
- // collector, but their existence must be a no-op.)
- if _, exists := newDescIDs[desc.id]; !exists {
- newDescIDs[desc.id] = struct{}{}
- collectorID ^= desc.id
- }
-
- // Are all the label names and the help string consistent with
- // previous descriptors of the same name?
- // First check existing descriptors...
- if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
- }
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
- }
- }
- }
- // A Collector yielding no Desc at all is considered unchecked.
- if len(newDescIDs) == 0 {
- r.uncheckedCollectors = append(r.uncheckedCollectors, c)
- return nil
- }
- if existing, exists := r.collectorsByID[collectorID]; exists {
- switch e := existing.(type) {
- case *wrappingCollector:
- return AlreadyRegisteredError{
- ExistingCollector: e.unwrapRecursively(),
- NewCollector: c,
- }
- default:
- return AlreadyRegisteredError{
- ExistingCollector: e,
- NewCollector: c,
- }
- }
- }
- // If the collectorID is new, but at least one of the descs existed
- // before, we are in trouble.
- if duplicateDescErr != nil {
- return duplicateDescErr
- }
-
- // Only after all tests have passed, actually register.
- r.collectorsByID[collectorID] = c
- for hash := range newDescIDs {
- r.descIDs[hash] = struct{}{}
- }
- for name, dimHash := range newDimHashesByName {
- r.dimHashesByName[name] = dimHash
- }
- return nil
-}
-
-// Unregister implements Registerer.
-func (r *Registry) Unregister(c Collector) bool {
- var (
- descChan = make(chan *Desc, capDescChan)
- descIDs = map[uint64]struct{}{}
- collectorID uint64 // All desc IDs XOR'd together.
- )
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
- for desc := range descChan {
- if _, exists := descIDs[desc.id]; !exists {
- collectorID ^= desc.id
- descIDs[desc.id] = struct{}{}
- }
- }
-
- r.mtx.RLock()
- if _, exists := r.collectorsByID[collectorID]; !exists {
- r.mtx.RUnlock()
- return false
- }
- r.mtx.RUnlock()
-
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- delete(r.collectorsByID, collectorID)
- for id := range descIDs {
- delete(r.descIDs, id)
- }
- // dimHashesByName is left untouched as those must be consistent
- // throughout the lifetime of a program.
- return true
-}
-
-// MustRegister implements Registerer.
-func (r *Registry) MustRegister(cs ...Collector) {
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-// Gather implements Gatherer.
-func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
- r.mtx.RLock()
-
- if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
- // Fast path.
- r.mtx.RUnlock()
- return nil, nil
- }
-
- var (
- checkedMetricChan = make(chan Metric, capMetricChan)
- uncheckedMetricChan = make(chan Metric, capMetricChan)
- metricHashes = map[uint64]struct{}{}
- wg sync.WaitGroup
- errs MultiError // The collected errors to return in the end.
- registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
- )
-
- goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
- metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
- checkedCollectors := make(chan Collector, len(r.collectorsByID))
- uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
- for _, collector := range r.collectorsByID {
- checkedCollectors <- collector
- }
- for _, collector := range r.uncheckedCollectors {
- uncheckedCollectors <- collector
- }
- // In case pedantic checks are enabled, we have to copy the map before
- // giving up the RLock.
- if r.pedanticChecksEnabled {
- registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
- for id := range r.descIDs {
- registeredDescIDs[id] = struct{}{}
- }
- }
- r.mtx.RUnlock()
-
- wg.Add(goroutineBudget)
-
- collectWorker := func() {
- for {
- select {
- case collector := <-checkedCollectors:
- collector.Collect(checkedMetricChan)
- case collector := <-uncheckedCollectors:
- collector.Collect(uncheckedMetricChan)
- default:
- return
- }
- wg.Done()
- }
- }
-
- // Start the first worker now to make sure at least one is running.
- go collectWorker()
- goroutineBudget--
-
- // Close checkedMetricChan and uncheckedMetricChan once all collectors
- // are collected.
- go func() {
- wg.Wait()
- close(checkedMetricChan)
- close(uncheckedMetricChan)
- }()
-
- // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
- defer func() {
- if checkedMetricChan != nil {
- for range checkedMetricChan {
- }
- }
- if uncheckedMetricChan != nil {
- for range uncheckedMetricChan {
- }
- }
- }()
-
- // Copy the channel references so we can nil them out later to remove
- // them from the select statements below.
- cmc := checkedMetricChan
- umc := uncheckedMetricChan
-
- for {
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- default:
- if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
- // All collectors are already being worked on or
- // we have already as many goroutines started as
- // there are collectors. Do the same as above,
- // just without the default.
- select {
- case metric, ok := <-cmc:
- if !ok {
- cmc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- registeredDescIDs,
- ))
- case metric, ok := <-umc:
- if !ok {
- umc = nil
- break
- }
- errs.Append(processMetric(
- metric, metricFamiliesByName,
- metricHashes,
- nil,
- ))
- }
- break
- }
- // Start more workers.
- go collectWorker()
- goroutineBudget--
- runtime.Gosched()
- }
- // Once both checkedMetricChan and uncheckedMetricChan are closed
- // and drained, the contraption above will nil out cmc and umc,
- // and then we can leave the collect loop here.
- if cmc == nil && umc == nil {
- break
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// Describe implements Collector.
-func (r *Registry) Describe(ch chan<- *Desc) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- // Only report the checked Collectors; unchecked collectors don't report any
- // Desc.
- for _, c := range r.collectorsByID {
- c.Describe(ch)
- }
-}
-
-// Collect implements Collector.
-func (r *Registry) Collect(ch chan<- Metric) {
- r.mtx.RLock()
- defer r.mtx.RUnlock()
-
- for _, c := range r.collectorsByID {
- c.Collect(ch)
- }
- for _, c := range r.uncheckedCollectors {
- c.Collect(ch)
- }
-}
-
-// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
-// Prometheus text format, and writes it to a temporary file. Upon success, the
-// temporary file is renamed to the provided filename.
-//
-// This is intended for use with the textfile collector of the node exporter.
-// Note that the node exporter expects the filename to be suffixed with ".prom".
-func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
- if err != nil {
- return err
- }
- defer os.Remove(tmp.Name())
-
- mfs, err := g.Gather()
- if err != nil {
- return err
- }
- for _, mf := range mfs {
- if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
- return err
- }
- }
- if err := tmp.Close(); err != nil {
- return err
- }
-
- if err := os.Chmod(tmp.Name(), 0o644); err != nil {
- return err
- }
- return os.Rename(tmp.Name(), filename)
-}
-
-// processMetric is an internal helper method only used by the Gather method.
-func processMetric(
- metric Metric,
- metricFamiliesByName map[string]*dto.MetricFamily,
- metricHashes map[uint64]struct{},
- registeredDescIDs map[uint64]struct{},
-) error {
- desc := metric.Desc()
- // Wrapped metrics collected by an unchecked Collector can have an
- // invalid Desc.
- if desc.err != nil {
- return desc.err
- }
- dtoMetric := &dto.Metric{}
- if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %w", desc, err)
- }
- metricFamily, ok := metricFamiliesByName[desc.fqName]
- if ok { // Existing name.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
- )
- }
- // TODO(beorn7): Simplify switch once Desc has type.
- switch metricFamily.GetType() {
- case dto.MetricType_COUNTER:
- if dtoMetric.Counter == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Counter",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_GAUGE:
- if dtoMetric.Gauge == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Gauge",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_SUMMARY:
- if dtoMetric.Summary == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Summary",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_UNTYPED:
- if dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %s %s should be Untyped",
- desc.fqName, dtoMetric,
- )
- }
- case dto.MetricType_HISTOGRAM:
- if dtoMetric.Histogram == nil {
- return fmt.Errorf(
- "collected metric %s %s should be a Histogram",
- desc.fqName, dtoMetric,
- )
- }
- default:
- panic("encountered MetricFamily with invalid type")
- }
- } else { // New name.
- metricFamily = &dto.MetricFamily{}
- metricFamily.Name = proto.String(desc.fqName)
- metricFamily.Help = proto.String(desc.help)
- // TODO(beorn7): Simplify switch once Desc has type.
- switch {
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- return fmt.Errorf("empty metric collected: %s", dtoMetric)
- }
- if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
- return err
- }
- metricFamiliesByName[desc.fqName] = metricFamily
- }
- if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
- return err
- }
- if registeredDescIDs != nil {
- // Is the desc registered at all?
- if _, exist := registeredDescIDs[desc.id]; !exist {
- return fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
- return err
- }
- }
- metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
- return nil
-}
-
-// Gatherers is a slice of Gatherer instances that implements the Gatherer
-// interface itself. Its Gather method calls Gather on all Gatherers in the
-// slice in order and returns the merged results. Errors returned from the
-// Gather calls are all returned in a flattened MultiError. Duplicate and
-// inconsistent Metrics are skipped (first occurrence in slice order wins) and
-// reported in the returned error.
-//
-// Gatherers can be used to merge the Gather results from multiple
-// Registries. It also provides a way to directly inject existing MetricFamily
-// protobufs into the gathering by creating a custom Gatherer with a Gather
-// method that simply returns the existing MetricFamily protobufs. Note that no
-// registration is involved (in contrast to Collector registration), so
-// obviously registration-time checks cannot happen. Any inconsistencies between
-// the gathered MetricFamilies are reported as errors by the Gather method, and
-// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
-// (e.g. syntactically invalid metric or label names) will go undetected.
-type Gatherers []Gatherer
-
-// Gather implements Gatherer.
-func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
- var (
- metricFamiliesByName = map[string]*dto.MetricFamily{}
- metricHashes = map[uint64]struct{}{}
- errs MultiError // The collected errors to return in the end.
- )
-
- for i, g := range gs {
- mfs, err := g.Gather()
- if err != nil {
- multiErr := MultiError{}
- if errors.As(err, &multiErr) {
- for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
- }
- } else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
- }
- }
- for _, mf := range mfs {
- existingMF, exists := metricFamiliesByName[mf.GetName()]
- if exists {
- if existingMF.GetHelp() != mf.GetHelp() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has help %q but should have %q",
- mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
- ))
- continue
- }
- if existingMF.GetType() != mf.GetType() {
- errs = append(errs, fmt.Errorf(
- "gathered metric family %s has type %s but should have %s",
- mf.GetName(), mf.GetType(), existingMF.GetType(),
- ))
- continue
- }
- } else {
- existingMF = &dto.MetricFamily{}
- existingMF.Name = mf.Name
- existingMF.Help = mf.Help
- existingMF.Type = mf.Type
- if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
- errs = append(errs, err)
- continue
- }
- metricFamiliesByName[mf.GetName()] = existingMF
- }
- for _, m := range mf.Metric {
- if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
- errs = append(errs, err)
- continue
- }
- existingMF.Metric = append(existingMF.Metric, m)
- }
- }
- }
- return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// checkSuffixCollisions checks for collisions with the “magic” suffixes the
-// Prometheus text format and the internal metric representation of the
-// Prometheus server add while flattening Summaries and Histograms.
-func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
- var (
- newName = mf.GetName()
- newType = mf.GetType()
- newNameWithoutSuffix = ""
- )
- switch {
- case strings.HasSuffix(newName, "_count"):
- newNameWithoutSuffix = newName[:len(newName)-6]
- case strings.HasSuffix(newName, "_sum"):
- newNameWithoutSuffix = newName[:len(newName)-4]
- case strings.HasSuffix(newName, "_bucket"):
- newNameWithoutSuffix = newName[:len(newName)-7]
- }
- if newNameWithoutSuffix != "" {
- if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
- switch existingMF.GetType() {
- case dto.MetricType_SUMMARY:
- if !strings.HasSuffix(newName, "_bucket") {
- return fmt.Errorf(
- "collected metric named %q collides with previously collected summary named %q",
- newName, newNameWithoutSuffix,
- )
- }
- case dto.MetricType_HISTOGRAM:
- return fmt.Errorf(
- "collected metric named %q collides with previously collected histogram named %q",
- newName, newNameWithoutSuffix,
- )
- }
- }
- }
- if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_count"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_count",
- )
- }
- if _, ok := mfs[newName+"_sum"]; ok {
- return fmt.Errorf(
- "collected histogram or summary named %q collides with previously collected metric named %q",
- newName, newName+"_sum",
- )
- }
- }
- if newType == dto.MetricType_HISTOGRAM {
- if _, ok := mfs[newName+"_bucket"]; ok {
- return fmt.Errorf(
- "collected histogram named %q collides with previously collected metric named %q",
- newName, newName+"_bucket",
- )
- }
- }
- return nil
-}
-
-// checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
-// name. If the resulting hash is already in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes.
-func checkMetricConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- metricHashes map[uint64]struct{},
-) error {
- name := metricFamily.GetName()
-
- // Type consistency with metric family.
- if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
- metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
- metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
- metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
- metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %q { %s} is not a %s",
- name, dtoMetric, metricFamily.GetType(),
- )
- }
-
- previousLabelName := ""
- for _, labelPair := range dtoMetric.GetLabel() {
- labelName := labelPair.GetName()
- if labelName == previousLabelName {
- return fmt.Errorf(
- "collected metric %q { %s} has two or more labels with the same name: %s",
- name, dtoMetric, labelName,
- )
- }
- if !checkLabelName(labelName) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label with an invalid name: %s",
- name, dtoMetric, labelName,
- )
- }
- if dtoMetric.Summary != nil && labelName == quantileLabel {
- return fmt.Errorf(
- "collected metric %q { %s} must not have an explicit %q label",
- name, dtoMetric, quantileLabel,
- )
- }
- if !utf8.ValidString(labelPair.GetValue()) {
- return fmt.Errorf(
- "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
- name, dtoMetric, labelName, labelPair.GetValue())
- }
- previousLabelName = labelName
- }
-
- // Is the metric unique (i.e. no other metric with the same name and the same labels)?
- h := xxhash.New()
- h.WriteString(name)
- h.Write(separatorByteSlice)
- // Make sure label pairs are sorted. We depend on it for the consistency
- // check.
- if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
- // We cannot sort dtoMetric.Label in place as it is immutable by contract.
- copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
- copy(copiedLabels, dtoMetric.Label)
- sort.Sort(internal.LabelPairSorter(copiedLabels))
- dtoMetric.Label = copiedLabels
- }
- for _, lp := range dtoMetric.Label {
- h.WriteString(lp.GetName())
- h.Write(separatorByteSlice)
- h.WriteString(lp.GetValue())
- h.Write(separatorByteSlice)
- }
- if dtoMetric.TimestampMs != nil {
- h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
- h.Write(separatorByteSlice)
- }
- hSum := h.Sum64()
- if _, exists := metricHashes[hSum]; exists {
- return fmt.Errorf(
- "collected metric %q { %s} was collected before with the same name and label values",
- name, dtoMetric,
- )
- }
- metricHashes[hSum] = struct{}{}
- return nil
-}
-
-func checkDescConsistency(
- metricFamily *dto.MetricFamily,
- dtoMetric *dto.Metric,
- desc *Desc,
-) error {
- // Desc help consistency with metric family help.
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
- )
- }
-
- // Is the desc consistent with the content of the metric?
- lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
- copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels.names {
- lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
- })
- }
- if len(lpsFromDesc) != len(dtoMetric.Label) {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- sort.Sort(internal.LabelPairSorter(lpsFromDesc))
- for i, lpFromDesc := range lpsFromDesc {
- lpFromMetric := dtoMetric.Label[i]
- if lpFromDesc.GetName() != lpFromMetric.GetName() ||
- lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- }
- return nil
-}
-
-var _ TransactionalGatherer = &MultiTRegistry{}
-
-// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
-// transactional gatherers.
-//
-// It is caller responsibility to ensure two registries have mutually exclusive metric families,
-// no deduplication will happen.
-type MultiTRegistry struct {
- tGatherers []TransactionalGatherer
-}
-
-// NewMultiTRegistry creates MultiTRegistry.
-func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
- return &MultiTRegistry{
- tGatherers: tGatherers,
- }
-}
-
-// Gather implements TransactionalGatherer interface.
-func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
- errs := MultiError{}
-
- dFns := make([]func(), 0, len(r.tGatherers))
- // TODO(bwplotka): Implement concurrency for those?
- for _, g := range r.tGatherers {
- // TODO(bwplotka): Check for duplicates?
- m, d, err := g.Gather()
- errs.Append(err)
-
- mfs = append(mfs, m...)
- dFns = append(dFns, d)
- }
-
- // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
- sort.Slice(mfs, func(i, j int) bool {
- return *mfs[i].Name < *mfs[j].Name
- })
- return mfs, func() {
- for _, d := range dFns {
- d()
- }
- }, errs.MaybeUnwrap()
-}
-
-// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
-// used by metric family is no longer used by a caller. This allows implementations with cache.
-type TransactionalGatherer interface {
- // Gather returns metrics in a lexicographically sorted slice
- // of uniquely named MetricFamily protobufs. Gather ensures that the
- // returned slice is valid and self-consistent so that it can be used
- // for valid exposition. As an exception to the strict consistency
- // requirements described for metric.Desc, Gather will tolerate
- // different sets of label names for metrics of the same metric family.
- //
- // Even if an error occurs, Gather attempts to gather as many metrics as
- // possible. Hence, if a non-nil error is returned, the returned
- // MetricFamily slice could be nil (in case of a fatal error that
- // prevented any meaningful metric collection) or contain a number of
- // MetricFamily protobufs, some of which might be incomplete, and some
- // might be missing altogether. The returned error (which might be a
- // MultiError) explains the details. Note that this is mostly useful for
- // debugging purposes. If the gathered protobufs are to be used for
- // exposition in actual monitoring, it is almost always better to not
- // expose an incomplete result and instead disregard the returned
- // MetricFamily protobufs in case the returned error is non-nil.
- //
- // Important: done is expected to be triggered (even if the error occurs!)
- // once caller does not need returned slice of dto.MetricFamily.
- Gather() (_ []*dto.MetricFamily, done func(), err error)
-}
-
-// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
-func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
- return &noTransactionGatherer{g: g}
-}
-
-type noTransactionGatherer struct {
- g Gatherer
-}
-
-// Gather implements TransactionalGatherer interface.
-func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
- mfs, err := g.g.Gather()
- return mfs, func() {}, err
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
deleted file mode 100644
index 1462704446..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ /dev/null
@@ -1,785 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/beorn7/perks/quantile"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// quantileLabel is used for the label that defines the quantile in a
-// summary.
-const quantileLabel = "quantile"
-
-// A Summary captures individual observations from an event or sample stream and
-// summarizes them in a manner similar to traditional summary statistics: 1. sum
-// of observations, 2. observation count, 3. rank estimations.
-//
-// A typical use-case is the observation of request latencies. By default, a
-// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations. However, the default behavior will change in the
-// upcoming v1.0.0 of the library. There will be no rank estimations at all by
-// default. For a sane transition, it is recommended to set the desired rank
-// estimations explicitly.
-//
-// Note that the rank estimations cannot be aggregated in a meaningful way with
-// the Prometheus query language (i.e. you cannot average or add them). If you
-// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
-// queries served across all instances of a service), consider the Histogram
-// metric type. See the Prometheus documentation for more details.
-//
-// To create Summary instances, use NewSummary.
-type Summary interface {
- Metric
- Collector
-
- // Observe adds a single observation to the summary. Observations are
- // usually positive or zero. Negative observations are accepted but
- // prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. See
- // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
- // for details.
- Observe(float64)
-}
-
-var errQuantileLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in summaries", quantileLabel,
-)
-
-// Default values for SummaryOpts.
-const (
- // DefMaxAge is the default duration for which observations stay
- // relevant.
- DefMaxAge time.Duration = 10 * time.Minute
- // DefAgeBuckets is the default number of buckets used to calculate the
- // age of observations.
- DefAgeBuckets = 5
- // DefBufCap is the standard buffer size for collecting Summary observations.
- DefBufCap = 500
-)
-
-// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name to a non-empty string. While all other fields are
-// optional and can safely be left at their zero value, it is recommended to set
-// a help string and to explicitly set the Objectives field to the desired value
-// as the default value will change in the upcoming v1.0.0 of the library.
-type SummaryOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Summary (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Summary must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Summary.
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // Due to the way a Summary is represented in the Prometheus text format
- // and how it is handled by the Prometheus server internally, “quantile”
- // is an illegal label name. Construction of a Summary or SummaryVec
- // will panic if this label name is used in ConstLabels.
- //
- // ConstLabels are only used rarely. In particular, do not use them to
- // attach the same labels to all your metrics. Those use cases are
- // better covered by target labels set by the scraping Prometheus
- // server, or by one specific metric (e.g. a build_info or a
- // machine_role metric). See also
- // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
- ConstLabels Labels
-
- // Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported for q
- // will be the φ-quantile value for some φ between q-e and q+e. The
- // default value is an empty map, resulting in a summary without
- // quantiles.
- Objectives map[float64]float64
-
- // MaxAge defines the duration for which an observation stays relevant
- // for the summary. Only applies to pre-calculated quantiles, does not
- // apply to _sum and _count. Must be positive. The default value is
- // DefMaxAge.
- MaxAge time.Duration
-
- // AgeBuckets is the number of buckets used to exclude observations that
- // are older than MaxAge from the summary. A higher number has a
- // resource penalty, so only increase it if the higher resolution is
- // really required. For very high observation rates, you might want to
- // reduce the number of age buckets. With only one age bucket, you will
- // effectively see a complete reset of the summary each time MaxAge has
- // passed. The default value is DefAgeBuckets.
- AgeBuckets uint32
-
- // BufCap defines the default sample stream buffer size. The default
- // value of DefBufCap should suffice for most uses. If there is a need
- // to increase the value, a multiple of 500 is recommended (because that
- // is the internal buffer size of the underlying package
- // "github.com/bmizerany/perks/quantile").
- BufCap uint32
-
- // now is for testing purposes, by default it's time.Now.
- now func() time.Time
-}
-
-// SummaryVecOpts bundles the options to create a SummaryVec metric.
-// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
-// is optional and can safely be left to its default value.
-type SummaryVecOpts struct {
- SummaryOpts
-
- // VariableLabels are used to partition the metric vector by the given set
- // of labels. Each label value will be constrained with the optional Constraint
- // function, if provided.
- VariableLabels ConstrainableLabels
-}
-
-// Problem with the sliding-window decay algorithm... The Merge method of
-// perk/quantile is actually not working as advertised - and it might be
-// unfixable, as the underlying algorithm is apparently not capable of merging
-// summaries in the first place. To avoid using Merge, we are currently adding
-// observations to _each_ age bucket, i.e. the effort to add a sample is
-// essentially multiplied by the number of age buckets. When rotating age
-// buckets, we empty the previous head stream. On scrape time, we simply take
-// the quantiles from the head stream (no merging required). Result: More effort
-// on observation time, less effort on scrape time, which is exactly the
-// opposite of what we try to accomplish, but at least the results are correct.
-//
-// The quite elegant previous contraption to merge the age buckets efficiently
-// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
-// can't be used anymore.
-
-// NewSummary creates a new Summary based on the provided SummaryOpts.
-func NewSummary(opts SummaryOpts) Summary {
- return newSummary(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels.names) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
- }
-
- for _, n := range desc.variableLabels.names {
- if n == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
-
- if opts.Objectives == nil {
- opts.Objectives = map[float64]float64{}
- }
-
- if opts.MaxAge < 0 {
- panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
- }
- if opts.MaxAge == 0 {
- opts.MaxAge = DefMaxAge
- }
-
- if opts.AgeBuckets == 0 {
- opts.AgeBuckets = DefAgeBuckets
- }
-
- if opts.BufCap == 0 {
- opts.BufCap = DefBufCap
- }
-
- if opts.now == nil {
- opts.now = time.Now
- }
- if len(opts.Objectives) == 0 {
- // Use the lock-free implementation of a Summary without objectives.
- s := &noObjectivesSummary{
- desc: desc,
- labelPairs: MakeLabelPairs(desc, labelValues),
- counts: [2]*summaryCounts{{}, {}},
- }
- s.init(s) // Init self-collection.
- s.createdTs = timestamppb.New(opts.now())
- return s
- }
-
- s := &summary{
- desc: desc,
-
- objectives: opts.Objectives,
- sortedObjectives: make([]float64, 0, len(opts.Objectives)),
-
- labelPairs: MakeLabelPairs(desc, labelValues),
-
- hotBuf: make([]float64, 0, opts.BufCap),
- coldBuf: make([]float64, 0, opts.BufCap),
- streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
- }
- s.headStreamExpTime = opts.now().Add(s.streamDuration)
- s.hotBufExpTime = s.headStreamExpTime
-
- for i := uint32(0); i < opts.AgeBuckets; i++ {
- s.streams = append(s.streams, s.newStream())
- }
- s.headStream = s.streams[0]
-
- for qu := range s.objectives {
- s.sortedObjectives = append(s.sortedObjectives, qu)
- }
- sort.Float64s(s.sortedObjectives)
-
- s.init(s) // Init self-collection.
- s.createdTs = timestamppb.New(opts.now())
- return s
-}
-
-type summary struct {
- selfCollector
-
- bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
- mtx sync.Mutex // Protects every other moving part.
- // Lock bufMtx before mtx if both are needed.
-
- desc *Desc
-
- objectives map[float64]float64
- sortedObjectives []float64
-
- labelPairs []*dto.LabelPair
-
- sum float64
- cnt uint64
-
- hotBuf, coldBuf []float64
-
- streams []*quantile.Stream
- streamDuration time.Duration
- headStream *quantile.Stream
- headStreamIdx int
- headStreamExpTime, hotBufExpTime time.Time
-
- createdTs *timestamppb.Timestamp
-}
-
-func (s *summary) Desc() *Desc {
- return s.desc
-}
-
-func (s *summary) Observe(v float64) {
- s.bufMtx.Lock()
- defer s.bufMtx.Unlock()
-
- now := time.Now()
- if now.After(s.hotBufExpTime) {
- s.asyncFlush(now)
- }
- s.hotBuf = append(s.hotBuf, v)
- if len(s.hotBuf) == cap(s.hotBuf) {
- s.asyncFlush(now)
- }
-}
-
-func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{
- CreatedTimestamp: s.createdTs,
- }
- qs := make([]*dto.Quantile, 0, len(s.objectives))
-
- s.bufMtx.Lock()
- s.mtx.Lock()
- // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
- s.bufMtx.Unlock()
-
- s.flushColdBuf()
- sum.SampleCount = proto.Uint64(s.cnt)
- sum.SampleSum = proto.Float64(s.sum)
-
- for _, rank := range s.sortedObjectives {
- var q float64
- if s.headStream.Count() == 0 {
- q = math.NaN()
- } else {
- q = s.headStream.Query(rank)
- }
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- s.mtx.Unlock()
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
- return nil
-}
-
-func (s *summary) newStream() *quantile.Stream {
- return quantile.NewTargeted(s.objectives)
-}
-
-// asyncFlush needs bufMtx locked.
-func (s *summary) asyncFlush(now time.Time) {
- s.mtx.Lock()
- s.swapBufs(now)
-
- // Unblock the original goroutine that was responsible for the mutation
- // that triggered the compaction. But hold onto the global non-buffer
- // state mutex until the operation finishes.
- go func() {
- s.flushColdBuf()
- s.mtx.Unlock()
- }()
-}
-
-// rotateStreams needs mtx AND bufMtx locked.
-func (s *summary) maybeRotateStreams() {
- for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
- s.headStream.Reset()
- s.headStreamIdx++
- if s.headStreamIdx >= len(s.streams) {
- s.headStreamIdx = 0
- }
- s.headStream = s.streams[s.headStreamIdx]
- s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
- }
-}
-
-// flushColdBuf needs mtx locked.
-func (s *summary) flushColdBuf() {
- for _, v := range s.coldBuf {
- for _, stream := range s.streams {
- stream.Insert(v)
- }
- s.cnt++
- s.sum += v
- }
- s.coldBuf = s.coldBuf[0:0]
- s.maybeRotateStreams()
-}
-
-// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
-func (s *summary) swapBufs(now time.Time) {
- if len(s.coldBuf) != 0 {
- panic("coldBuf is not empty")
- }
- s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
- // hotBuf is now empty and gets new expiration set.
- for now.After(s.hotBufExpTime) {
- s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
- }
-}
-
-type summaryCounts struct {
- // sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- sumBits uint64
- count uint64
-}
-
-type noObjectivesSummary struct {
- // countAndHotIdx enables lock-free writes with use of atomic updates.
- // The most significant bit is the hot index [0 or 1] of the count field
- // below. Observe calls update the hot one. All remaining bits count the
- // number of Observe calls. Observe starts by incrementing this counter,
- // and finish by incrementing the count field in the respective
- // summaryCounts, as a marker for completion.
- //
- // Calls of the Write method (which are non-mutating reads from the
- // perspective of the summary) swap the hot–cold under the writeMtx
- // lock. A cooldown is awaited (while locked) by comparing the number of
- // observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
- // be merged into the new hot before releasing writeMtx.
-
- // Fields with atomic access first! See alignment constraint:
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- countAndHotIdx uint64
-
- selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
-
- // Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric. It has to be an array of
- // pointers to guarantee 64bit alignment of the histogramCounts, see
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- counts [2]*summaryCounts
-
- labelPairs []*dto.LabelPair
-
- createdTs *timestamppb.Timestamp
-}
-
-func (s *noObjectivesSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *noObjectivesSummary) Observe(v float64) {
- // We increment h.countAndHotIdx so that the counter in the lower
- // 63 bits gets incremented. At the same time, we get the new value
- // back, which we can use to find the currently-hot counts.
- n := atomic.AddUint64(&s.countAndHotIdx, 1)
- hotCounts := s.counts[n>>63]
-
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
-}
-
-func (s *noObjectivesSummary) Write(out *dto.Metric) error {
- // For simplicity, we protect this whole method by a mutex. It is not in
- // the hot path, i.e. Observe is called much more often than Write. The
- // complication of making Write lock-free isn't worth it, if possible at
- // all.
- s.writeMtx.Lock()
- defer s.writeMtx.Unlock()
-
- // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
- // without touching the count bits. See the struct comments for a full
- // description of the algorithm.
- n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
- // count is contained unchanged in the lower 63 bits.
- count := n & ((1 << 63) - 1)
- // The most significant bit tells us which counts is hot. The complement
- // is thus the cold one.
- hotCounts := s.counts[n>>63]
- coldCounts := s.counts[(^n)>>63]
-
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
-
- sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
- CreatedTimestamp: s.createdTs,
- }
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
- return nil
-}
-
-type quantSort []*dto.Quantile
-
-func (s quantSort) Len() int {
- return len(s)
-}
-
-func (s quantSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s quantSort) Less(i, j int) bool {
- return s[i].GetQuantile() < s[j].GetQuantile()
-}
-
-// SummaryVec is a Collector that bundles a set of Summaries that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewSummaryVec.
-type SummaryVec struct {
- *MetricVec
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names.
-//
-// Due to the way a Summary is represented in the Prometheus text format and how
-// it is handled by the Prometheus server internally, “quantile” is an illegal
-// label name. NewSummaryVec will panic if this label name is used.
-func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- return V2.NewSummaryVec(SummaryVecOpts{
- SummaryOpts: opts,
- VariableLabels: UnconstrainedLabels(labelNames),
- })
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
-func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
- for _, ln := range opts.VariableLabels.labelNames() {
- if ln == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- desc := V2.NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- opts.VariableLabels,
- opts.ConstLabels,
- )
- return &SummaryVec{
- MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts.SummaryOpts, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues returns the Summary for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Summary is created.
-//
-// It is possible to call this method without using the returned Summary to only
-// create the new Summary but leave it at its starting value, a Summary without
-// any observations.
-//
-// Keeping the Summary for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Summary from the SummaryVec. In that case,
-// the Summary will still exist, but it will not be exported anymore, even if a
-// Summary with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// GetMetricWith returns the Summary for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Summary is created. Implications of
-// creating a Summary without using it and keeping the Summary for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
- metric, err := v.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Observer), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. Not returning an
-// error allows shortcuts like
-//
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
- s, err := v.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. Not returning an error allows shortcuts like
-//
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (v *SummaryVec) With(labels Labels) Observer {
- s, err := v.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return s
-}
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the SummaryVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
- vec, err := v.MetricVec.CurryWith(labels)
- if vec != nil {
- return &SummaryVec{vec}, err
- }
- return nil, err
-}
-
-// MustCurryWith works as CurryWith but panics where CurryWith would have
-// returned an error.
-func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
- vec, err := v.CurryWith(labels)
- if err != nil {
- panic(err)
- }
- return vec
-}
-
-type constSummary struct {
- desc *Desc
- count uint64
- sum float64
- quantiles map[float64]float64
- labelPairs []*dto.LabelPair
- createdTs *timestamppb.Timestamp
-}
-
-func (s *constSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{
- CreatedTimestamp: s.createdTs,
- }
- qs := make([]*dto.Quantile, 0, len(s.quantiles))
-
- sum.SampleCount = proto.Uint64(s.count)
- sum.SampleSum = proto.Float64(s.sum)
-
- for rank, q := range s.quantiles {
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- return nil
-}
-
-// NewConstSummary returns a metric representing a Prometheus summary with fixed
-// values for the count, sum, and quantiles. As those parameters cannot be
-// changed, the returned value does not implement the Summary interface (but
-// only the Metric interface). Users of this package will not have much use for
-// it in regular operations. However, when implementing custom Collectors, it is
-// useful as a throw-away metric that is generated on the fly to send it to
-// Prometheus in the Collect method.
-//
-// quantiles maps ranks to quantile values. For example, a median latency of
-// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-//
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
-//
-// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc or if Desc is invalid.
-func NewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- return &constSummary{
- desc: desc,
- count: count,
- sum: sum,
- quantiles: quantiles,
- labelPairs: MakeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstSummary is a version of NewConstSummary that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) Metric {
- m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
deleted file mode 100644
index 8d2f05500b..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/lint.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "fmt"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/testutil/promlint"
-)
-
-// CollectAndLint registers the provided Collector with a newly created pedantic
-// Registry. It then calls GatherAndLint with that Registry and with the
-// provided metricNames.
-func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- return nil, fmt.Errorf("registering collector failed: %w", err)
- }
- return GatherAndLint(reg, metricNames...)
-}
-
-// GatherAndLint gathers all metrics from the provided Gatherer and checks them
-// with the linter in the promlint package. If any metricNames are provided,
-// only metrics with those names are checked.
-func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) {
- got, err := g.Gather()
- if err != nil {
- return nil, fmt.Errorf("gathering metrics failed: %w", err)
- }
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- }
- return promlint.NewWithMetricFamilies(got).Lint()
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go
deleted file mode 100644
index 9ba42826ad..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promlint
-
-import dto "github.com/prometheus/client_model/go"
-
-// A Problem is an issue detected by a linter.
-type Problem struct {
- // The name of the metric indicated by this Problem.
- Metric string
-
- // A description of the issue for this Problem.
- Text string
-}
-
-// newProblem is helper function to create a Problem.
-func newProblem(mf *dto.MetricFamily, text string) Problem {
- return Problem{
- Metric: mf.GetName(),
- Text: text,
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
deleted file mode 100644
index ea46f38ecf..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promlint provides a linter for Prometheus metrics.
-package promlint
-
-import (
- "errors"
- "io"
- "sort"
-
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/expfmt"
-)
-
-// A Linter is a Prometheus metrics linter. It identifies issues with metric
-// names, types, and metadata, and reports them to the caller.
-type Linter struct {
- // The linter will read metrics in the Prometheus text format from r and
- // then lint it, _and_ it will lint the metrics provided directly as
- // MetricFamily proto messages in mfs. Note, however, that the current
- // constructor functions New and NewWithMetricFamilies only ever set one
- // of them.
- r io.Reader
- mfs []*dto.MetricFamily
-
- customValidations []Validation
-}
-
-// New creates a new Linter that reads an input stream of Prometheus metrics in
-// the Prometheus text exposition format.
-func New(r io.Reader) *Linter {
- return &Linter{
- r: r,
- }
-}
-
-// NewWithMetricFamilies creates a new Linter that reads from a slice of
-// MetricFamily protobuf messages.
-func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter {
- return &Linter{
- mfs: mfs,
- }
-}
-
-// AddCustomValidations adds custom validations to the linter.
-func (l *Linter) AddCustomValidations(vs ...Validation) {
- if l.customValidations == nil {
- l.customValidations = make([]Validation, 0, len(vs))
- }
- l.customValidations = append(l.customValidations, vs...)
-}
-
-// Lint performs a linting pass, returning a slice of Problems indicating any
-// issues found in the metrics stream. The slice is sorted by metric name
-// and issue description.
-func (l *Linter) Lint() ([]Problem, error) {
- var problems []Problem
-
- if l.r != nil {
- d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain))
-
- mf := &dto.MetricFamily{}
- for {
- if err := d.Decode(mf); err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return nil, err
- }
-
- problems = append(problems, l.lint(mf)...)
- }
- }
- for _, mf := range l.mfs {
- problems = append(problems, l.lint(mf)...)
- }
-
- // Ensure deterministic output.
- sort.SliceStable(problems, func(i, j int) bool {
- if problems[i].Metric == problems[j].Metric {
- return problems[i].Text < problems[j].Text
- }
- return problems[i].Metric < problems[j].Metric
- })
-
- return problems, nil
-}
-
-// lint is the entry point for linting a single metric.
-func (l *Linter) lint(mf *dto.MetricFamily) []Problem {
- var problems []Problem
-
- for _, fn := range defaultValidations {
- errs := fn(mf)
- for _, err := range errs {
- problems = append(problems, newProblem(mf, err.Error()))
- }
- }
-
- if l.customValidations != nil {
- for _, fn := range l.customValidations {
- errs := fn(mf)
- for _, err := range errs {
- problems = append(problems, newProblem(mf, err.Error()))
- }
- }
- }
-
- // TODO(mdlayher): lint rules for specific metrics types.
- return problems
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go
deleted file mode 100644
index f52ad9eab6..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package promlint
-
-import (
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/testutil/promlint/validations"
-)
-
-type Validation = func(mf *dto.MetricFamily) []error
-
-var defaultValidations = []Validation{
- validations.LintHelp,
- validations.LintMetricUnits,
- validations.LintCounter,
- validations.LintHistogramSummaryReserved,
- validations.LintMetricTypeInName,
- validations.LintReservedChars,
- validations.LintCamelCase,
- validations.LintUnitAbbreviations,
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go
deleted file mode 100644
index f2c2c3905d..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validations
-
-import (
- "errors"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// LintCounter detects issues specific to counters, as well as patterns that should
-// only be used with counters.
-func LintCounter(mf *dto.MetricFamily) []error {
- var problems []error
-
- isCounter := mf.GetType() == dto.MetricType_COUNTER
- isUntyped := mf.GetType() == dto.MetricType_UNTYPED
- hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
-
- switch {
- case isCounter && !hasTotalSuffix:
- problems = append(problems, errors.New(`counter metrics should have "_total" suffix`))
- case !isUntyped && !isCounter && hasTotalSuffix:
- problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`))
- }
-
- return problems
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go
deleted file mode 100644
index bc8dbd1e16..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validations
-
-import (
- "errors"
- "fmt"
- "regexp"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-var camelCase = regexp.MustCompile(`[a-z][A-Z]`)
-
-// LintMetricUnits detects issues with metric unit names.
-func LintMetricUnits(mf *dto.MetricFamily) []error {
- var problems []error
-
- unit, base, ok := metricUnits(*mf.Name)
- if !ok {
- // No known units detected.
- return nil
- }
-
- // Unit is already a base unit.
- if unit == base {
- return nil
- }
-
- problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit))
-
- return problems
-}
-
-// LintMetricTypeInName detects when metric types are included in the metric name.
-func LintMetricTypeInName(mf *dto.MetricFamily) []error {
- var problems []error
- n := strings.ToLower(mf.GetName())
-
- for i, t := range dto.MetricType_name {
- if i == int32(dto.MetricType_UNTYPED) {
- continue
- }
-
- typename := strings.ToLower(t)
- if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) {
- problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename))
- }
- }
- return problems
-}
-
-// LintReservedChars detects colons in metric names.
-func LintReservedChars(mf *dto.MetricFamily) []error {
- var problems []error
- if strings.Contains(mf.GetName(), ":") {
- problems = append(problems, errors.New("metric names should not contain ':'"))
- }
- return problems
-}
-
-// LintCamelCase detects metric names and label names written in camelCase.
-func LintCamelCase(mf *dto.MetricFamily) []error {
- var problems []error
- if camelCase.FindString(mf.GetName()) != "" {
- problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'"))
- }
-
- for _, m := range mf.GetMetric() {
- for _, l := range m.GetLabel() {
- if camelCase.FindString(l.GetName()) != "" {
- problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'"))
- }
- }
- }
- return problems
-}
-
-// LintUnitAbbreviations detects abbreviated units in the metric name.
-func LintUnitAbbreviations(mf *dto.MetricFamily) []error {
- var problems []error
- n := strings.ToLower(mf.GetName())
- for _, s := range unitAbbreviations {
- if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) {
- problems = append(problems, errors.New("metric names should not contain abbreviated units"))
- }
- }
- return problems
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go
deleted file mode 100644
index 1df2944689..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validations
-
-import (
- "errors"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// LintHelp detects issues related to the help text for a metric.
-func LintHelp(mf *dto.MetricFamily) []error {
- var problems []error
-
- // Expect all metrics to have help text available.
- if mf.Help == nil {
- problems = append(problems, errors.New("no help text"))
- }
-
- return problems
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go
deleted file mode 100644
index 6564bdf366..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validations
-
-import (
- "errors"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// LintHistogramSummaryReserved detects when other types of metrics use names or labels
-// reserved for use by histograms and/or summaries.
-func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error {
- // These rules do not apply to untyped metrics.
- t := mf.GetType()
- if t == dto.MetricType_UNTYPED {
- return nil
- }
-
- var problems []error
-
- isHistogram := t == dto.MetricType_HISTOGRAM
- isSummary := t == dto.MetricType_SUMMARY
-
- n := mf.GetName()
-
- if !isHistogram && strings.HasSuffix(n, "_bucket") {
- problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`))
- }
- if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
- problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`))
- }
- if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
- problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`))
- }
-
- for _, m := range mf.GetMetric() {
- for _, l := range m.GetLabel() {
- ln := l.GetName()
-
- if !isHistogram && ln == "le" {
- problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`))
- }
- if !isSummary && ln == "quantile" {
- problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`))
- }
- }
- }
-
- return problems
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go
deleted file mode 100644
index 967977d2b0..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package validations
-
-import "strings"
-
-// Units and their possible prefixes recognized by this library. More can be
-// added over time as needed.
-var (
- // map a unit to the appropriate base unit.
- units = map[string]string{
- // Base units.
- "amperes": "amperes",
- "bytes": "bytes",
- "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases.
- "grams": "grams",
- "joules": "joules",
- "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements).
- "meters": "meters", // Both American and international spelling permitted.
- "metres": "metres",
- "seconds": "seconds",
- "volts": "volts",
-
- // Non base units.
- // Time.
- "minutes": "seconds",
- "hours": "seconds",
- "days": "seconds",
- "weeks": "seconds",
- // Temperature.
- "kelvins": "kelvin",
- "fahrenheit": "celsius",
- "rankine": "celsius",
- // Length.
- "inches": "meters",
- "yards": "meters",
- "miles": "meters",
- // Bytes.
- "bits": "bytes",
- // Energy.
- "calories": "joules",
- // Mass.
- "pounds": "grams",
- "ounces": "grams",
- }
-
- unitPrefixes = []string{
- "pico",
- "nano",
- "micro",
- "milli",
- "centi",
- "deci",
- "deca",
- "hecto",
- "kilo",
- "kibi",
- "mega",
- "mibi",
- "giga",
- "gibi",
- "tera",
- "tebi",
- "peta",
- "pebi",
- }
-
- // Common abbreviations that we'd like to discourage.
- unitAbbreviations = []string{
- "s",
- "ms",
- "us",
- "ns",
- "sec",
- "b",
- "kb",
- "mb",
- "gb",
- "tb",
- "pb",
- "m",
- "h",
- "d",
- }
-)
-
-// metricUnits attempts to detect known unit types used as part of a metric name,
-// e.g. "foo_bytes_total" or "bar_baz_milligrams".
-func metricUnits(m string) (unit, base string, ok bool) {
- ss := strings.Split(m, "_")
-
- for _, s := range ss {
- if base, found := units[s]; found {
- return s, base, true
- }
-
- for _, p := range unitPrefixes {
- if strings.HasPrefix(s, p) {
- if base, found := units[s[len(p):]]; found {
- return s, base, true
- }
- }
- }
- }
-
- return "", "", false
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
deleted file mode 100644
index 9dce15eafa..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides helpers to test code using the prometheus package
-// of client_golang.
-//
-// While writing unit tests to verify correct instrumentation of your code, it's
-// a common mistake to mostly test the instrumentation library instead of your
-// own code. Rather than verifying that a prometheus.Counter's value has changed
-// as expected or that it shows up in the exposition after registration, it is
-// in general more robust and more faithful to the concept of unit tests to use
-// mock implementations of the prometheus.Counter and prometheus.Registerer
-// interfaces that simply assert that the Add or Register methods have been
-// called with the expected arguments. However, this might be overkill in simple
-// scenarios. The ToFloat64 function is provided for simple inspection of a
-// single-value metric, but it has to be used with caution.
-//
-// End-to-end tests to verify all or larger parts of the metrics exposition can
-// be implemented with the CollectAndCompare or GatherAndCompare functions. The
-// most appropriate use is not so much testing instrumentation of your code, but
-// testing custom prometheus.Collector implementations and in particular whole
-// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
-// and convert it into Prometheus metrics.
-//
-// In a similar pattern, CollectAndLint and GatherAndLint can be used to detect
-// metrics that have issues with their name, type, or metadata without being
-// necessarily invalid, e.g. a counter with a name missing the “_total” suffix.
-package testutil
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "reflect"
-
- "github.com/davecgh/go-spew/spew"
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/expfmt"
- "google.golang.org/protobuf/proto"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/internal"
-)
-
-// ToFloat64 collects all Metrics from the provided Collector. It expects that
-// this results in exactly one Metric being collected, which must be a Gauge,
-// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
-// the value of the collected Metric.
-//
-// The Collector provided is typically a simple instance of Gauge or Counter, or
-// – less commonly – a GaugeVec or CounterVec with exactly one element. But any
-// Collector fulfilling the prerequisites described above will do.
-//
-// Use this function with caution. It is computationally very expensive and thus
-// not suited at all to read values from Metrics in regular code. This is really
-// only for testing purposes, and even for testing, other approaches are often
-// more appropriate (see this package's documentation).
-//
-// A clear anti-pattern would be to use a metric type from the prometheus
-// package to track values that are also needed for something else than the
-// exposition of Prometheus metrics. For example, you would like to track the
-// number of items in a queue because your code should reject queuing further
-// items if a certain limit is reached. It is tempting to track the number of
-// items in a prometheus.Gauge, as it is then easily available as a metric for
-// exposition, too. However, then you would need to call ToFloat64 in your
-// regular code, potentially quite often. The recommended way is to track the
-// number of items conventionally (in the way you would have done it without
-// considering Prometheus metrics) and then expose the number with a
-// prometheus.GaugeFunc.
-func ToFloat64(c prometheus.Collector) float64 {
- var (
- m prometheus.Metric
- mCount int
- mChan = make(chan prometheus.Metric)
- done = make(chan struct{})
- )
-
- go func() {
- for m = range mChan {
- mCount++
- }
- close(done)
- }()
-
- c.Collect(mChan)
- close(mChan)
- <-done
-
- if mCount != 1 {
- panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
- }
-
- pb := &dto.Metric{}
- if err := m.Write(pb); err != nil {
- panic(fmt.Errorf("error happened while collecting metrics: %w", err))
- }
- if pb.Gauge != nil {
- return pb.Gauge.GetValue()
- }
- if pb.Counter != nil {
- return pb.Counter.GetValue()
- }
- if pb.Untyped != nil {
- return pb.Untyped.GetValue()
- }
- panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
-}
-
-// CollectAndCount registers the provided Collector with a newly created
-// pedantic Registry. It then calls GatherAndCount with that Registry and with
-// the provided metricNames. In the unlikely case that the registration or the
-// gathering fails, this function panics. (This is inconsistent with the other
-// CollectAnd… functions in this package and has historical reasons. Changing
-// the function signature would be a breaking change and will therefore only
-// happen with the next major version bump.)
-func CollectAndCount(c prometheus.Collector, metricNames ...string) int {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- panic(fmt.Errorf("registering collector failed: %w", err))
- }
- result, err := GatherAndCount(reg, metricNames...)
- if err != nil {
- panic(err)
- }
- return result
-}
-
-// GatherAndCount gathers all metrics from the provided Gatherer and counts
-// them. It returns the number of metric children in all gathered metric
-// families together. If any metricNames are provided, only metrics with those
-// names are counted.
-func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) {
- got, err := g.Gather()
- if err != nil {
- return 0, fmt.Errorf("gathering metrics failed: %w", err)
- }
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- }
-
- result := 0
- for _, mf := range got {
- result += len(mf.GetMetric())
- }
- return result, nil
-}
-
-// ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in
-// plain text format. Then it compares it with the results that the `expected` would return.
-// If the `metricNames` is not empty it would filter the comparison only to the given metric names.
-func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error {
- resp, err := http.Get(url)
- if err != nil {
- return fmt.Errorf("scraping metrics failed: %w", err)
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("the scraping target returned a status code other than 200: %d",
- resp.StatusCode)
- }
-
- scraped, err := convertReaderToMetricFamily(resp.Body)
- if err != nil {
- return err
- }
-
- wanted, err := convertReaderToMetricFamily(expected)
- if err != nil {
- return err
- }
-
- return compareMetricFamilies(scraped, wanted, metricNames...)
-}
-
-// CollectAndCompare registers the provided Collector with a newly created
-// pedantic Registry. It then calls GatherAndCompare with that Registry and with
-// the provided metricNames.
-func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
- reg := prometheus.NewPedanticRegistry()
- if err := reg.Register(c); err != nil {
- return fmt.Errorf("registering collector failed: %w", err)
- }
- return GatherAndCompare(reg, expected, metricNames...)
-}
-
-// GatherAndCompare gathers all metrics from the provided Gatherer and compares
-// it to an expected output read from the provided Reader in the Prometheus text
-// exposition format. If any metricNames are provided, only metrics with those
-// names are compared.
-func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
- return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...)
-}
-
-// TransactionalGatherAndCompare gathers all metrics from the provided Gatherer and compares
-// it to an expected output read from the provided Reader in the Prometheus text
-// exposition format. If any metricNames are provided, only metrics with those
-// names are compared.
-func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error {
- got, done, err := g.Gather()
- defer done()
- if err != nil {
- return fmt.Errorf("gathering metrics failed: %w", err)
- }
-
- wanted, err := convertReaderToMetricFamily(expected)
- if err != nil {
- return err
- }
-
- return compareMetricFamilies(got, wanted, metricNames...)
-}
-
-// convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of
-// dto.MetricFamily.
-func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) {
- var tp expfmt.TextParser
- notNormalized, err := tp.TextToMetricFamilies(reader)
- if err != nil {
- return nil, fmt.Errorf("converting reader to metric families failed: %w", err)
- }
-
- // The text protocol handles empty help fields inconsistently. When
- // encoding, any non-nil value, include the empty string, produces a
- // "# HELP" line. But when decoding, the help field is only set to a
- // non-nil value if the "# HELP" line contains a non-empty value.
- //
- // Because metrics in a registry always have non-nil help fields, populate
- // any nil help fields in the parsed metrics with the empty string so that
- // when we compare text encodings, the results are consistent.
- for _, metric := range notNormalized {
- if metric.Help == nil {
- metric.Help = proto.String("")
- }
- }
-
- return internal.NormalizeMetricFamilies(notNormalized), nil
-}
-
-// compareMetricFamilies would compare 2 slices of metric families, and optionally filters both of
-// them to the `metricNames` provided.
-func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error {
- if metricNames != nil {
- got = filterMetrics(got, metricNames)
- expected = filterMetrics(expected, metricNames)
- }
-
- return compare(got, expected)
-}
-
-// compare encodes both provided slices of metric families into the text format,
-// compares their string message, and returns an error if they do not match.
-// The error contains the encoded text of both the desired and the actual
-// result.
-func compare(got, want []*dto.MetricFamily) error {
- var gotBuf, wantBuf bytes.Buffer
- enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain))
- for _, mf := range got {
- if err := enc.Encode(mf); err != nil {
- return fmt.Errorf("encoding gathered metrics failed: %w", err)
- }
- }
- enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain))
- for _, mf := range want {
- if err := enc.Encode(mf); err != nil {
- return fmt.Errorf("encoding expected metrics failed: %w", err)
- }
- }
- if diffErr := diff(wantBuf, gotBuf); diffErr != "" {
- return fmt.Errorf(diffErr)
- }
- return nil
-}
-
-// diff returns a diff of both values as long as both are of the same type and
-// are a struct, map, slice, array or string. Otherwise it returns an empty string.
-func diff(expected, actual interface{}) string {
- if expected == nil || actual == nil {
- return ""
- }
-
- et, ek := typeAndKind(expected)
- at, _ := typeAndKind(actual)
- if et != at {
- return ""
- }
-
- if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
- return ""
- }
-
- var e, a string
- c := spew.ConfigState{
- Indent: " ",
- DisablePointerAddresses: true,
- DisableCapacities: true,
- SortKeys: true,
- }
- if et != reflect.TypeOf("") {
- e = c.Sdump(expected)
- a = c.Sdump(actual)
- } else {
- e = reflect.ValueOf(expected).String()
- a = reflect.ValueOf(actual).String()
- }
-
- diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{
- A: internal.SplitLines(e),
- B: internal.SplitLines(a),
- FromFile: "metric output does not match expectation; want",
- FromDate: "",
- ToFile: "got:",
- ToDate: "",
- Context: 1,
- })
-
- if diff == "" {
- return ""
- }
-
- return "\n\nDiff:\n" + diff
-}
-
-// typeAndKind returns the type and kind of the given interface{}
-func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
- t := reflect.TypeOf(v)
- k := t.Kind()
-
- if k == reflect.Ptr {
- t = t.Elem()
- k = t.Kind()
- }
- return t, k
-}
-
-func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
- var filtered []*dto.MetricFamily
- for _, m := range metrics {
- for _, name := range names {
- if m.GetName() == name {
- filtered = append(filtered, m)
- break
- }
- }
- }
- return filtered
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
deleted file mode 100644
index 52344fef53..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "time"
-
-// Timer is a helper type to time functions. Use NewTimer to create new
-// instances.
-type Timer struct {
- begin time.Time
- observer Observer
-}
-
-// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
-// later on will be also supported.
-// Timer is usually used to time a function call in the
-// following way:
-//
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
-//
-// or
-//
-// func TimeMeWithExemplar() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDurationWithExemplar(exemplar)
-// // Do actual work.
-// }
-func NewTimer(o Observer) *Timer {
- return &Timer{
- begin: time.Now(),
- observer: o,
- }
-}
-
-// ObserveDuration records the duration passed since the Timer was created with
-// NewTimer. It calls the Observe method of the Observer provided during
-// construction with the duration in seconds as an argument. The observed
-// duration is also returned. ObserveDuration is usually called with a defer
-// statement.
-//
-// Note that this method is only guaranteed to never observe negative durations
-// if used with Go1.9+.
-func (t *Timer) ObserveDuration() time.Duration {
- d := time.Since(t.begin)
- if t.observer != nil {
- t.observer.Observe(d.Seconds())
- }
- return d
-}
-
-// ObserveDurationWithExemplar is like ObserveDuration, but it will also
-// observe exemplar with the duration unless exemplar is nil or provided Observer can't
-// be casted to ExemplarObserver.
-func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
- d := time.Since(t.begin)
- eo, ok := t.observer.(ExemplarObserver)
- if ok && exemplar != nil {
- eo.ObserveWithExemplar(d.Seconds(), exemplar)
- return d
- }
- if t.observer != nil {
- t.observer.Observe(d.Seconds())
- }
- return d
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
deleted file mode 100644
index 0f9ce63f40..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// UntypedOpts is an alias for Opts. See there for doc comments.
-type UntypedOpts Opts
-
-// UntypedFunc works like GaugeFunc but the collected metric is of type
-// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
-// type.
-//
-// To create UntypedFunc instances, use NewUntypedFunc.
-type UntypedFunc interface {
- Metric
- Collector
-}
-
-// NewUntypedFunc creates a new UntypedFunc based on the provided
-// UntypedOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where an UntypedFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
deleted file mode 100644
index cc23011fad..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "sort"
- "time"
- "unicode/utf8"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// ValueType is an enumeration of metric types that represent a simple value.
-type ValueType int
-
-// Possible values for the ValueType enum. Use UntypedValue to mark a metric
-// with an unknown type.
-const (
- _ ValueType = iota
- CounterValue
- GaugeValue
- UntypedValue
-)
-
-var (
- CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
- GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
- UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
-)
-
-func (v ValueType) ToDTO() *dto.MetricType {
- switch v {
- case CounterValue:
- return CounterMetricTypePtr
- case GaugeValue:
- return GaugeMetricTypePtr
- default:
- return UntypedMetricTypePtr
- }
-}
-
-// valueFunc is a generic metric for simple values retrieved on collect time
-// from a function. It implements Metric and Collector. Its effective type is
-// determined by ValueType. This is a low-level building block used by the
-// library to back the implementations of CounterFunc, GaugeFunc, and
-// UntypedFunc.
-type valueFunc struct {
- selfCollector
-
- desc *Desc
- valType ValueType
- function func() float64
- labelPairs []*dto.LabelPair
-}
-
-// newValueFunc returns a newly allocated valueFunc with the given Desc and
-// ValueType. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a valueFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
- result := &valueFunc{
- desc: desc,
- valType: valueType,
- function: function,
- labelPairs: MakeLabelPairs(desc, nil),
- }
- result.init(result)
- return result
-}
-
-func (v *valueFunc) Desc() *Desc {
- return v.desc
-}
-
-func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
-}
-
-// NewConstMetric returns a metric with one fixed value that cannot be
-// changed. Users of this package will not have much use for it in regular
-// operations. However, when implementing custom Collectors, it is useful as a
-// throw-away metric that is generated on the fly to send it to Prometheus in
-// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc or if Desc is
-// invalid.
-func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
-
- metric := &dto.Metric{}
- if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
- return nil, err
- }
-
- return &constMetric{
- desc: desc,
- metric: metric,
- }, nil
-}
-
-// MustNewConstMetric is a version of NewConstMetric that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
- m, err := NewConstMetric(desc, valueType, value, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
-// with created timestamp set and returns an error for other metric types.
-func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
- if desc.err != nil {
- return nil, desc.err
- }
- if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
- return nil, err
- }
- switch valueType {
- case CounterValue:
- break
- default:
- return nil, errors.New("created timestamps are only supported for counters")
- }
-
- metric := &dto.Metric{}
- if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
- return nil, err
- }
-
- return &constMetric{
- desc: desc,
- metric: metric,
- }, nil
-}
-
-// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
-// NewConstMetricWithCreatedTimestamp would have returned an error.
-func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
- m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type constMetric struct {
- desc *Desc
- metric *dto.Metric
-}
-
-func (m *constMetric) Desc() *Desc {
- return m.desc
-}
-
-func (m *constMetric) Write(out *dto.Metric) error {
- out.Label = m.metric.Label
- out.Counter = m.metric.Counter
- out.Gauge = m.metric.Gauge
- out.Untyped = m.metric.Untyped
- return nil
-}
-
-func populateMetric(
- t ValueType,
- v float64,
- labelPairs []*dto.LabelPair,
- e *dto.Exemplar,
- m *dto.Metric,
- ct *timestamppb.Timestamp,
-) error {
- m.Label = labelPairs
- switch t {
- case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
- case GaugeValue:
- m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
- case UntypedValue:
- m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
- default:
- return fmt.Errorf("encountered unknown type %v", t)
- }
- return nil
-}
-
-// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
-// variable and constant labels in the provided Desc. The values for the
-// variable labels are defined by the labelValues slice, which must be in the
-// same order as the corresponding variable labels in the Desc.
-//
-// This function is only needed for custom Metric implementations. See MetricVec
-// example.
-func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
- if totalLen == 0 {
- // Super fast path.
- return nil
- }
- if len(desc.variableLabels.names) == 0 {
- // Moderately fast path.
- return desc.constLabelPairs
- }
- labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, l := range desc.variableLabels.names {
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(l),
- Value: proto.String(labelValues[i]),
- })
- }
- labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(internal.LabelPairSorter(labelPairs))
- return labelPairs
-}
-
-// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 128
-
-// newExemplar creates a new dto.Exemplar from the provided values. An error is
-// returned if any of the label names or values are invalid or if the total
-// number of runes in the label names and values exceeds ExemplarMaxRunes.
-func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
- e := &dto.Exemplar{}
- e.Value = proto.Float64(value)
- tsProto := timestamppb.New(ts)
- if err := tsProto.CheckValid(); err != nil {
- return nil, err
- }
- e.Timestamp = tsProto
- labelPairs := make([]*dto.LabelPair, 0, len(l))
- var runes int
- for name, value := range l {
- if !checkLabelName(name) {
- return nil, fmt.Errorf("exemplar label name %q is invalid", name)
- }
- runes += utf8.RuneCountInString(name)
- if !utf8.ValidString(value) {
- return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
- }
- runes += utf8.RuneCountInString(value)
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(name),
- Value: proto.String(value),
- })
- }
- if runes > ExemplarMaxRunes {
- return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
- }
- e.Label = labelPairs
- return e, nil
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
deleted file mode 100644
index 955cfd59f8..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ /dev/null
@@ -1,709 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sync"
-
- "github.com/prometheus/common/model"
-)
-
-// MetricVec is a Collector to bundle metrics of the same name that differ in
-// their label values. MetricVec is not used directly but as a building block
-// for implementations of vectors of a given metric type, like GaugeVec,
-// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
-// used for custom Metric implementations.
-//
-// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
-// FooVec and initialize it with NewMetricVec. Implement wrappers for
-// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
-// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
-// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
-// add the convenience methods WithLabelValues, With, and MustCurryWith, which
-// panic instead of returning errors. See also the MetricVec example.
-type MetricVec struct {
- *metricMap
-
- curry []curriedLabelValue
-
- // hashAdd and hashAddByte can be replaced for testing collision handling.
- hashAdd func(h uint64, s string) uint64
- hashAddByte func(h uint64, b byte) uint64
-}
-
-// NewMetricVec returns an initialized metricVec.
-func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
- return &MetricVec{
- metricMap: &metricMap{
- metrics: map[uint64][]metricWithLabelValues{},
- desc: desc,
- newMetric: newMetric,
- },
- hashAdd: hashAdd,
- hashAddByte: hashAddByte,
- }
-}
-
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
- lvs = constrainLabelValues(m.desc, lvs, m.curry)
-
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
-}
-
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc. However, such inconsistent Labels
-// can never match an actual metric, so the method will always return false in
-// that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
- }
-
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
-}
-
-// DeletePartialMatch deletes all metrics where the variable labels contain all of those
-// passed in as labels. The order of the labels does not matter.
-// It returns the number of metrics deleted.
-//
-// Note that curried labels will never be matched if deleting from the curried vector.
-// To match curried labels with DeletePartialMatch, it must be called on the base vector.
-func (m *MetricVec) DeletePartialMatch(labels Labels) int {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- return m.metricMap.deleteByLabels(labels, m.curry)
-}
-
-// Without explicit forwarding of Describe, Collect, Reset, those methods won't
-// show up in GoDoc.
-
-// Describe implements Collector.
-func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
-
-// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
-
-// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() { m.metricMap.Reset() }
-
-// CurryWith returns a vector curried with the provided labels, i.e. the
-// returned vector has those labels pre-set for all labeled operations performed
-// on it. The cardinality of the curried vector is reduced accordingly. The
-// order of the remaining labels stays the same (just with the curried labels
-// taken out of the sequence – which is relevant for the
-// (GetMetric)WithLabelValues methods). It is possible to curry a curried
-// vector, but only with labels not yet used for currying before.
-//
-// The metrics contained in the MetricVec are shared between the curried and
-// uncurried vectors. They are just accessed differently. Curried and uncurried
-// vectors behave identically in terms of collection. Only one must be
-// registered with a given registry (usually the uncurried version). The Reset
-// method deletes all metrics, even if called on a curried vector.
-//
-// Note that CurryWith is usually not called directly but through a wrapper
-// around MetricVec, implementing a vector for a specific Metric
-// implementation, for example GaugeVec.
-func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
- var (
- newCurry []curriedLabelValue
- oldCurry = m.curry
- iCurry int
- )
- for i, labelName := range m.desc.variableLabels.names {
- val, ok := labels[labelName]
- if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
- if ok {
- return nil, fmt.Errorf("label name %q is already curried", labelName)
- }
- newCurry = append(newCurry, oldCurry[iCurry])
- iCurry++
- } else {
- if !ok {
- continue // Label stays uncurried.
- }
- newCurry = append(newCurry, curriedLabelValue{
- i,
- m.desc.variableLabels.constrain(labelName, val),
- })
- }
- }
- if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
- return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
- }
-
- return &MetricVec{
- metricMap: m.metricMap,
- curry: newCurry,
- hashAdd: m.hashAdd,
- hashAddByte: m.hashAddByte,
- }, nil
-}
-
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the variable labels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created (by
-// calling the newMetric function provided during construction of the
-// MetricVec).
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it in its initial state.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later.
-//
-// An error is returned if the number of label values is not the same as the
-// number of variable labels in Desc (minus any curried labels).
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-//
-// Note that GetMetricWithLabelValues is usually not called directly but through
-// a wrapper around MetricVec, implementing a vector for a specific Metric
-// implementation, for example GaugeVec.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
- lvs = constrainLabelValues(m.desc, lvs, m.curry)
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
-}
-
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the variable labels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use
-// are the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the variable labels in Desc (minus any curried labels).
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-//
-// Note that GetMetricWith is usually not called directly but through a wrapper
-// around MetricVec, implementing a vector for a specific Metric implementation,
-// for example GaugeVec.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
- labels, closer := constrainLabels(m.desc, labels)
- defer closer()
-
- h, err := m.hashLabels(labels)
- if err != nil {
- return nil, err
- }
-
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
-}
-
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iVals, iCurry int
- )
- for i := 0; i < len(m.desc.variableLabels.names); i++ {
- if iCurry < len(curry) && curry[iCurry].index == i {
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- h = m.hashAdd(h, vals[iVals])
- iVals++
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
- return 0, err
- }
-
- var (
- h = hashNew()
- curry = m.curry
- iCurry int
- )
- for i, labelName := range m.desc.variableLabels.names {
- val, ok := labels[labelName]
- if iCurry < len(curry) && curry[iCurry].index == i {
- if ok {
- return 0, fmt.Errorf("label name %q is already curried", labelName)
- }
- h = m.hashAdd(h, curry[iCurry].value)
- iCurry++
- } else {
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", labelName)
- }
- h = m.hashAdd(h, val)
- }
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-// metricWithLabelValues provides the metric and its label values for
-// disambiguation on hash collision.
-type metricWithLabelValues struct {
- values []string
- metric Metric
-}
-
-// curriedLabelValue sets the curried value for a label at the given index.
-type curriedLabelValue struct {
- index int
- value string
-}
-
-// metricMap is a helper for metricVec and shared between differently curried
-// metricVecs.
-type metricMap struct {
- mtx sync.RWMutex // Protects metrics.
- metrics map[uint64][]metricWithLabelValues
- desc *Desc
- newMetric func(labelValues ...string) Metric
-}
-
-// Describe implements Collector. It will send exactly one Desc to the provided
-// channel.
-func (m *metricMap) Describe(ch chan<- *Desc) {
- ch <- m.desc
-}
-
-// Collect implements Collector.
-func (m *metricMap) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
-
- for _, metrics := range m.metrics {
- for _, metric := range metrics {
- ch <- metric.metric
- }
- }
-}
-
-// Reset deletes all metrics in this vector.
-func (m *metricMap) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.metrics {
- delete(m.metrics, h)
- }
-}
-
-// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
-// there are multiple matches in the bucket, use lvs to select a metric and
-// remove only that metric.
-func (m *metricMap) deleteByHashWithLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
-
- i := findMetricWithLabelValues(metrics, lvs, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- old := metrics
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- old[len(old)-1] = metricWithLabelValues{}
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// deleteByHashWithLabels removes the metric from the hash bucket h. If there
-// are multiple matches in the bucket, use lvs to select a metric and remove
-// only that metric.
-func (m *metricMap) deleteByHashWithLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- metrics, ok := m.metrics[h]
- if !ok {
- return false
- }
- i := findMetricWithLabels(m.desc, metrics, labels, curry)
- if i >= len(metrics) {
- return false
- }
-
- if len(metrics) > 1 {
- old := metrics
- m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
- old[len(old)-1] = metricWithLabelValues{}
- } else {
- delete(m.metrics, h)
- }
- return true
-}
-
-// deleteByLabels deletes a metric if the given labels are present in the metric.
-func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- var numDeleted int
-
- for h, metrics := range m.metrics {
- i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
- if i >= len(metrics) {
- // Didn't find matching labels in this metric slice.
- continue
- }
- delete(m.metrics, h)
- numDeleted++
- }
-
- return numDeleted
-}
-
-// findMetricWithPartialLabel returns the index of the matching metric or
-// len(metrics) if not found.
-func findMetricWithPartialLabels(
- desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchPartialLabels(desc, metric.values, labels, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-// indexOf searches the given slice of strings for the target string and returns
-// the index or len(items) as well as a boolean whether the search succeeded.
-func indexOf(target string, items []string) (int, bool) {
- for i, l := range items {
- if l == target {
- return i, true
- }
- }
- return len(items), false
-}
-
-// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
-// and returns whether it matches either the "base" value or the curried value accordingly.
-// It also indicates whether the match is against a curried or uncurried value.
-func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
- for _, curriedValue := range curry {
- if curriedValue.index == index {
- // This label was curried. See if the curried value matches our target.
- return curriedValue.value == targetValue, true
- }
- }
- // This label was not curried. See if the current value matches our target label.
- return values[index] == targetValue, false
-}
-
-// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
-func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
- for l, v := range labels {
- // Check if the target label exists in our metrics and get the index.
- varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
- if validLabel {
- // Check the value of that label against the target value.
- // We don't consider curried values in partial matches.
- matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
- if matches && !curried {
- continue
- }
- }
- return false
- }
- return true
-}
-
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabelValues(
- hash uint64, lvs []string, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
- if !ok {
- inlinedLVs := inlineLabelValues(lvs, curry)
- metric = m.newMetric(inlinedLVs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
- }
- return metric
-}
-
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
-// or creates it and returns the new one.
-//
-// This function holds the mutex.
-func (m *metricMap) getOrCreateMetricWithLabels(
- hash uint64, labels Labels, curry []curriedLabelValue,
-) Metric {
- m.mtx.RLock()
- metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
- m.mtx.RUnlock()
- if ok {
- return metric
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
- if !ok {
- lvs := extractLabelValues(m.desc, labels, curry)
- metric = m.newMetric(lvs...)
- m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
- }
- return metric
-}
-
-// getMetricWithHashAndLabelValues gets a metric while handling possible
-// collisions in the hash space. Must be called while holding the read mutex.
-func (m *metricMap) getMetricWithHashAndLabelValues(
- h uint64, lvs []string, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// getMetricWithHashAndLabels gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *metricMap) getMetricWithHashAndLabels(
- h uint64, labels Labels, curry []curriedLabelValue,
-) (Metric, bool) {
- metrics, ok := m.metrics[h]
- if ok {
- if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
- return metrics[i].metric, true
- }
- }
- return nil, false
-}
-
-// findMetricWithLabelValues returns the index of the matching metric or
-// len(metrics) if not found.
-func findMetricWithLabelValues(
- metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabelValues(metric.values, lvs, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-// findMetricWithLabels returns the index of the matching metric or len(metrics)
-// if not found.
-func findMetricWithLabels(
- desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
-) int {
- for i, metric := range metrics {
- if matchLabels(desc, metric.values, labels, curry) {
- return i
- }
- }
- return len(metrics)
-}
-
-func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
- if len(values) != len(lvs)+len(curry) {
- return false
- }
- var iLVs, iCurry int
- for i, v := range values {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if v != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if v != lvs[iLVs] {
- return false
- }
- iLVs++
- }
- return true
-}
-
-func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
- if len(values) != len(labels)+len(curry) {
- return false
- }
- iCurry := 0
- for i, k := range desc.variableLabels.names {
- if iCurry < len(curry) && curry[iCurry].index == i {
- if values[i] != curry[iCurry].value {
- return false
- }
- iCurry++
- continue
- }
- if values[i] != labels[k] {
- return false
- }
- }
- return true
-}
-
-func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(labels)+len(curry))
- iCurry := 0
- for i, k := range desc.variableLabels.names {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = labels[k]
- }
- return labelValues
-}
-
-func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
- labelValues := make([]string, len(lvs)+len(curry))
- var iCurry, iLVs int
- for i := range labelValues {
- if iCurry < len(curry) && curry[iCurry].index == i {
- labelValues[i] = curry[iCurry].value
- iCurry++
- continue
- }
- labelValues[i] = lvs[iLVs]
- iLVs++
- }
- return labelValues
-}
-
-var labelsPool = &sync.Pool{
- New: func() interface{} {
- return make(Labels)
- },
-}
-
-func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
- if len(desc.variableLabels.labelConstraints) == 0 {
- // Fast path when there's no constraints
- return labels, func() {}
- }
-
- constrainedLabels := labelsPool.Get().(Labels)
- for l, v := range labels {
- constrainedLabels[l] = desc.variableLabels.constrain(l, v)
- }
-
- return constrainedLabels, func() {
- for k := range constrainedLabels {
- delete(constrainedLabels, k)
- }
- labelsPool.Put(constrainedLabels)
- }
-}
-
-func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
- if len(desc.variableLabels.labelConstraints) == 0 {
- // Fast path when there's no constraints
- return lvs
- }
-
- constrainedValues := make([]string, len(lvs))
- var iCurry, iLVs int
- for i := 0; i < len(lvs)+len(curry); i++ {
- if iCurry < len(curry) && curry[iCurry].index == i {
- iCurry++
- continue
- }
-
- if i < len(desc.variableLabels.names) {
- constrainedValues[iLVs] = desc.variableLabels.constrain(
- desc.variableLabels.names[i],
- lvs[iLVs],
- )
- } else {
- constrainedValues[iLVs] = lvs[iLVs]
- }
- iLVs++
- }
- return constrainedValues
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
deleted file mode 100644
index 42bc3a8f06..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-type v2 struct{}
-
-// V2 is a struct that can be referenced to access experimental API that might
-// be present in v2 of client golang someday. It offers extended functionality
-// of v1 with slightly changed API. It is acceptable to use some pieces from v1
-// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
-// in the same codebase.
-var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
deleted file mode 100644
index 25da157f15..0000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sort"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-// WrapRegistererWith returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided Labels to all Metrics it collects (as
-// ConstLabels). The Metrics collected by the unmodified Collector must not
-// duplicate any of those labels. Wrapping a nil value is valid, resulting
-// in a no-op Registerer.
-//
-// WrapRegistererWith provides a way to add fixed labels to a subset of
-// Collectors. It should not be used to add fixed labels to all metrics
-// exposed. See also
-// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-//
-// The Collector example demonstrates a use of WrapRegistererWith.
-func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- labels: labels,
- }
-}
-
-// WrapRegistererWithPrefix returns a Registerer wrapping the provided
-// Registerer. Collectors registered with the returned Registerer will be
-// registered with the wrapped Registerer in a modified way. The modified
-// Collector adds the provided prefix to the name of all Metrics it collects.
-// Wrapping a nil value is valid, resulting in a no-op Registerer.
-//
-// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
-// a sub-system. To make this work, register metrics of the sub-system with the
-// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
-// to use the same prefix for all metrics exposed. In particular, do not prefix
-// metric names that are standardized across applications, as that would break
-// horizontal monitoring, for example the metrics provided by the Go collector
-// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
-// respectively.)
-//
-// Conflicts between Collectors registered through the original Registerer with
-// Collectors registered through the wrapping Registerer will still be
-// detected. Any AlreadyRegisteredError returned by the Register method of
-// either Registerer will contain the ExistingCollector in the form it was
-// provided to the respective registry.
-func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
- return &wrappingRegisterer{
- wrappedRegisterer: reg,
- prefix: prefix,
- }
-}
-
-type wrappingRegisterer struct {
- wrappedRegisterer Registerer
- prefix string
- labels Labels
-}
-
-func (r *wrappingRegisterer) Register(c Collector) error {
- if r.wrappedRegisterer == nil {
- return nil
- }
- return r.wrappedRegisterer.Register(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
- if r.wrappedRegisterer == nil {
- return
- }
- for _, c := range cs {
- if err := r.Register(c); err != nil {
- panic(err)
- }
- }
-}
-
-func (r *wrappingRegisterer) Unregister(c Collector) bool {
- if r.wrappedRegisterer == nil {
- return false
- }
- return r.wrappedRegisterer.Unregister(&wrappingCollector{
- wrappedCollector: c,
- prefix: r.prefix,
- labels: r.labels,
- })
-}
-
-type wrappingCollector struct {
- wrappedCollector Collector
- prefix string
- labels Labels
-}
-
-func (c *wrappingCollector) Collect(ch chan<- Metric) {
- wrappedCh := make(chan Metric)
- go func() {
- c.wrappedCollector.Collect(wrappedCh)
- close(wrappedCh)
- }()
- for m := range wrappedCh {
- ch <- &wrappingMetric{
- wrappedMetric: m,
- prefix: c.prefix,
- labels: c.labels,
- }
- }
-}
-
-func (c *wrappingCollector) Describe(ch chan<- *Desc) {
- wrappedCh := make(chan *Desc)
- go func() {
- c.wrappedCollector.Describe(wrappedCh)
- close(wrappedCh)
- }()
- for desc := range wrappedCh {
- ch <- wrapDesc(desc, c.prefix, c.labels)
- }
-}
-
-func (c *wrappingCollector) unwrapRecursively() Collector {
- switch wc := c.wrappedCollector.(type) {
- case *wrappingCollector:
- return wc.unwrapRecursively()
- default:
- return wc
- }
-}
-
-type wrappingMetric struct {
- wrappedMetric Metric
- prefix string
- labels Labels
-}
-
-func (m *wrappingMetric) Desc() *Desc {
- return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
-}
-
-func (m *wrappingMetric) Write(out *dto.Metric) error {
- if err := m.wrappedMetric.Write(out); err != nil {
- return err
- }
- if len(m.labels) == 0 {
- // No wrapping labels.
- return nil
- }
- for ln, lv := range m.labels {
- out.Label = append(out.Label, &dto.LabelPair{
- Name: proto.String(ln),
- Value: proto.String(lv),
- })
- }
- sort.Sort(internal.LabelPairSorter(out.Label))
- return nil
-}
-
-func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
- constLabels := Labels{}
- for _, lp := range desc.constLabelPairs {
- constLabels[*lp.Name] = *lp.Value
- }
- for ln, lv := range labels {
- if _, alreadyUsed := constLabels[ln]; alreadyUsed {
- return &Desc{
- fqName: desc.fqName,
- help: desc.help,
- variableLabels: desc.variableLabels,
- constLabelPairs: desc.constLabelPairs,
- err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
- }
- }
- constLabels[ln] = lv
- }
- // NewDesc will do remaining validations.
- newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
- // Propagate errors if there was any. This will override any errer
- // created by NewDesc above, i.e. earlier errors get precedence.
- if desc.err != nil {
- newDesc.err = desc.err
- }
- return newDesc
-}
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
deleted file mode 100644
index 20110e410e..0000000000
--- a/vendor/github.com/prometheus/client_model/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Data model artifacts for Prometheus.
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
deleted file mode 100644
index 2f15490758..0000000000
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ /dev/null
@@ -1,1399 +0,0 @@
-// Copyright 2013 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.20.3
-// source: io/prometheus/client/metrics.proto
-
-package io_prometheus_client
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type MetricType int32
-
-const (
- // COUNTER must use the Metric field "counter".
- MetricType_COUNTER MetricType = 0
- // GAUGE must use the Metric field "gauge".
- MetricType_GAUGE MetricType = 1
- // SUMMARY must use the Metric field "summary".
- MetricType_SUMMARY MetricType = 2
- // UNTYPED must use the Metric field "untyped".
- MetricType_UNTYPED MetricType = 3
- // HISTOGRAM must use the Metric field "histogram".
- MetricType_HISTOGRAM MetricType = 4
- // GAUGE_HISTOGRAM must use the Metric field "histogram".
- MetricType_GAUGE_HISTOGRAM MetricType = 5
-)
-
-// Enum value maps for MetricType.
-var (
- MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
- }
- MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
- }
-)
-
-func (x MetricType) Enum() *MetricType {
- p := new(MetricType)
- *p = x
- return p
-}
-
-func (x MetricType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (MetricType) Descriptor() protoreflect.EnumDescriptor {
- return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
-}
-
-func (MetricType) Type() protoreflect.EnumType {
- return &file_io_prometheus_client_metrics_proto_enumTypes[0]
-}
-
-func (x MetricType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *MetricType) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = MetricType(num)
- return nil
-}
-
-// Deprecated: Use MetricType.Descriptor instead.
-func (MetricType) EnumDescriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
-}
-
-type LabelPair struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *LabelPair) Reset() {
- *x = LabelPair{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *LabelPair) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LabelPair) ProtoMessage() {}
-
-func (x *LabelPair) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *LabelPair) GetName() string {
- if x != nil && x.Name != nil {
- return *x.Name
- }
- return ""
-}
-
-func (x *LabelPair) GetValue() string {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return ""
-}
-
-type Gauge struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Gauge) Reset() {
- *x = Gauge{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Gauge) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Gauge) ProtoMessage() {}
-
-func (x *Gauge) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
-func (*Gauge) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Gauge) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Counter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
-}
-
-func (x *Counter) Reset() {
- *x = Counter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Counter) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Counter) ProtoMessage() {}
-
-func (x *Counter) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
-func (*Counter) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Counter) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-func (x *Counter) GetExemplar() *Exemplar {
- if x != nil {
- return x.Exemplar
- }
- return nil
-}
-
-func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-type Quantile struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Quantile) Reset() {
- *x = Quantile{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Quantile) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Quantile) ProtoMessage() {}
-
-func (x *Quantile) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
-func (*Quantile) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Quantile) GetQuantile() float64 {
- if x != nil && x.Quantile != nil {
- return *x.Quantile
- }
- return 0
-}
-
-func (x *Quantile) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Summary struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
-}
-
-func (x *Summary) Reset() {
- *x = Summary{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Summary) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Summary) ProtoMessage() {}
-
-func (x *Summary) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
-func (*Summary) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *Summary) GetSampleCount() uint64 {
- if x != nil && x.SampleCount != nil {
- return *x.SampleCount
- }
- return 0
-}
-
-func (x *Summary) GetSampleSum() float64 {
- if x != nil && x.SampleSum != nil {
- return *x.SampleSum
- }
- return 0
-}
-
-func (x *Summary) GetQuantile() []*Quantile {
- if x != nil {
- return x.Quantile
- }
- return nil
-}
-
-func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-type Untyped struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
-}
-
-func (x *Untyped) Reset() {
- *x = Untyped{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Untyped) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Untyped) ProtoMessage() {}
-
-func (x *Untyped) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
-func (*Untyped) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Untyped) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-type Histogram struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- // Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
- CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
- // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
- // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
- // then each power of two is divided into 2^n logarithmic buckets.
- // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
- // In the future, more bucket schemas may be added using numbers < -4 or > 8.
- Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
- // Negative buckets for the native histogram.
- NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
- // Use either "negative_delta" or "negative_count", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
- // Positive buckets for the native histogram.
- // Use a no-op span (offset 0, length 0) for a native histogram without any
- // observations yet and with a zero_threshold of 0. Otherwise, it would be
- // indistinguishable from a classic histogram.
- PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
- // Use either "positive_delta" or "positive_count", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
- // Only used for native histograms. These exemplars MUST have a timestamp.
- Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
-}
-
-func (x *Histogram) Reset() {
- *x = Histogram{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Histogram) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Histogram) ProtoMessage() {}
-
-func (x *Histogram) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
-func (*Histogram) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Histogram) GetSampleCount() uint64 {
- if x != nil && x.SampleCount != nil {
- return *x.SampleCount
- }
- return 0
-}
-
-func (x *Histogram) GetSampleCountFloat() float64 {
- if x != nil && x.SampleCountFloat != nil {
- return *x.SampleCountFloat
- }
- return 0
-}
-
-func (x *Histogram) GetSampleSum() float64 {
- if x != nil && x.SampleSum != nil {
- return *x.SampleSum
- }
- return 0
-}
-
-func (x *Histogram) GetBucket() []*Bucket {
- if x != nil {
- return x.Bucket
- }
- return nil
-}
-
-func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CreatedTimestamp
- }
- return nil
-}
-
-func (x *Histogram) GetSchema() int32 {
- if x != nil && x.Schema != nil {
- return *x.Schema
- }
- return 0
-}
-
-func (x *Histogram) GetZeroThreshold() float64 {
- if x != nil && x.ZeroThreshold != nil {
- return *x.ZeroThreshold
- }
- return 0
-}
-
-func (x *Histogram) GetZeroCount() uint64 {
- if x != nil && x.ZeroCount != nil {
- return *x.ZeroCount
- }
- return 0
-}
-
-func (x *Histogram) GetZeroCountFloat() float64 {
- if x != nil && x.ZeroCountFloat != nil {
- return *x.ZeroCountFloat
- }
- return 0
-}
-
-func (x *Histogram) GetNegativeSpan() []*BucketSpan {
- if x != nil {
- return x.NegativeSpan
- }
- return nil
-}
-
-func (x *Histogram) GetNegativeDelta() []int64 {
- if x != nil {
- return x.NegativeDelta
- }
- return nil
-}
-
-func (x *Histogram) GetNegativeCount() []float64 {
- if x != nil {
- return x.NegativeCount
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveSpan() []*BucketSpan {
- if x != nil {
- return x.PositiveSpan
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveDelta() []int64 {
- if x != nil {
- return x.PositiveDelta
- }
- return nil
-}
-
-func (x *Histogram) GetPositiveCount() []float64 {
- if x != nil {
- return x.PositiveCount
- }
- return nil
-}
-
-func (x *Histogram) GetExemplars() []*Exemplar {
- if x != nil {
- return x.Exemplars
- }
- return nil
-}
-
-// A Bucket of a conventional histogram, each of which is treated as
-// an individual counter-like time series by Prometheus.
-type Bucket struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
- Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
-}
-
-func (x *Bucket) Reset() {
- *x = Bucket{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Bucket) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Bucket) ProtoMessage() {}
-
-func (x *Bucket) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
-func (*Bucket) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *Bucket) GetCumulativeCount() uint64 {
- if x != nil && x.CumulativeCount != nil {
- return *x.CumulativeCount
- }
- return 0
-}
-
-func (x *Bucket) GetCumulativeCountFloat() float64 {
- if x != nil && x.CumulativeCountFloat != nil {
- return *x.CumulativeCountFloat
- }
- return 0
-}
-
-func (x *Bucket) GetUpperBound() float64 {
- if x != nil && x.UpperBound != nil {
- return *x.UpperBound
- }
- return 0
-}
-
-func (x *Bucket) GetExemplar() *Exemplar {
- if x != nil {
- return x.Exemplar
- }
- return nil
-}
-
-// A BucketSpan defines a number of consecutive buckets in a native
-// histogram with their offset. Logically, it would be more
-// straightforward to include the bucket counts in the Span. However,
-// the protobuf representation is more compact in the way the data is
-// structured here (with all the buckets in a single array separate
-// from the Spans).
-type BucketSpan struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
-}
-
-func (x *BucketSpan) Reset() {
- *x = BucketSpan{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BucketSpan) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BucketSpan) ProtoMessage() {}
-
-func (x *BucketSpan) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *BucketSpan) GetOffset() int32 {
- if x != nil && x.Offset != nil {
- return *x.Offset
- }
- return 0
-}
-
-func (x *BucketSpan) GetLength() uint32 {
- if x != nil && x.Length != nil {
- return *x.Length
- }
- return 0
-}
-
-type Exemplar struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
-}
-
-func (x *Exemplar) Reset() {
- *x = Exemplar{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Exemplar) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Exemplar) ProtoMessage() {}
-
-func (x *Exemplar) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *Exemplar) GetLabel() []*LabelPair {
- if x != nil {
- return x.Label
- }
- return nil
-}
-
-func (x *Exemplar) GetValue() float64 {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return 0
-}
-
-func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-type Metric struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
-}
-
-func (x *Metric) Reset() {
- *x = Metric{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Metric) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Metric) ProtoMessage() {}
-
-func (x *Metric) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
-func (*Metric) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *Metric) GetLabel() []*LabelPair {
- if x != nil {
- return x.Label
- }
- return nil
-}
-
-func (x *Metric) GetGauge() *Gauge {
- if x != nil {
- return x.Gauge
- }
- return nil
-}
-
-func (x *Metric) GetCounter() *Counter {
- if x != nil {
- return x.Counter
- }
- return nil
-}
-
-func (x *Metric) GetSummary() *Summary {
- if x != nil {
- return x.Summary
- }
- return nil
-}
-
-func (x *Metric) GetUntyped() *Untyped {
- if x != nil {
- return x.Untyped
- }
- return nil
-}
-
-func (x *Metric) GetHistogram() *Histogram {
- if x != nil {
- return x.Histogram
- }
- return nil
-}
-
-func (x *Metric) GetTimestampMs() int64 {
- if x != nil && x.TimestampMs != nil {
- return *x.TimestampMs
- }
- return 0
-}
-
-type MetricFamily struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
-}
-
-func (x *MetricFamily) Reset() {
- *x = MetricFamily{}
- if protoimpl.UnsafeEnabled {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MetricFamily) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MetricFamily) ProtoMessage() {}
-
-func (x *MetricFamily) ProtoReflect() protoreflect.Message {
- mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *MetricFamily) GetName() string {
- if x != nil && x.Name != nil {
- return *x.Name
- }
- return ""
-}
-
-func (x *MetricFamily) GetHelp() string {
- if x != nil && x.Help != nil {
- return *x.Help
- }
- return ""
-}
-
-func (x *MetricFamily) GetType() MetricType {
- if x != nil && x.Type != nil {
- return *x.Type
- }
- return MetricType_COUNTER
-}
-
-func (x *MetricFamily) GetMetric() []*Metric {
- if x != nil {
- return x.Metric
- }
- return nil
-}
-
-func (x *MetricFamily) GetUnit() string {
- if x != nil && x.Unit != nil {
- return *x.Unit
- }
- return ""
-}
-
-var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
-
-var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
- 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
- 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
- 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
- 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
- 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
- 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
- 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
- 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
- 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
- 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
- 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
- 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
- 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
- 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
- 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
- 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
- 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
- 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
- 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
- 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
- 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
- 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
- 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
- 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
- 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
- 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
- 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
- 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
- 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
- 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
- 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
- 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
- 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
- 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
- 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
- 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
- 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
- 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
- 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
- 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
- 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
- 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
- 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
- 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
- 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
- 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
- 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
- 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
- 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
- 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
- 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
- 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
- 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
- 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
- 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
- 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
- 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
- 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
- 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
- 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
- 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
- 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
- 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
- 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74,
-}
-
-var (
- file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
- file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
-)
-
-func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
- file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
- file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
- })
- return file_io_prometheus_client_metrics_proto_rawDescData
-}
-
-var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
-var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
- (MetricType)(0), // 0: io.prometheus.client.MetricType
- (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
- (*Gauge)(nil), // 2: io.prometheus.client.Gauge
- (*Counter)(nil), // 3: io.prometheus.client.Counter
- (*Quantile)(nil), // 4: io.prometheus.client.Quantile
- (*Summary)(nil), // 5: io.prometheus.client.Summary
- (*Untyped)(nil), // 6: io.prometheus.client.Untyped
- (*Histogram)(nil), // 7: io.prometheus.client.Histogram
- (*Bucket)(nil), // 8: io.prometheus.client.Bucket
- (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
- (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
- (*Metric)(nil), // 11: io.prometheus.client.Metric
- (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
- (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
-}
-var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
- 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
- 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
- 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
- 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
- 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
- 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
- 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
- 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
- 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
- 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
- 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
- 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
- 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
- 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
- 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
- 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
- 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
- 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
- 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
- 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
- 20, // [20:20] is the sub-list for method output_type
- 20, // [20:20] is the sub-list for method input_type
- 20, // [20:20] is the sub-list for extension type_name
- 20, // [20:20] is the sub-list for extension extendee
- 0, // [0:20] is the sub-list for field type_name
-}
-
-func init() { file_io_prometheus_client_metrics_proto_init() }
-func file_io_prometheus_client_metrics_proto_init() {
- if File_io_prometheus_client_metrics_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LabelPair); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Gauge); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Counter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Quantile); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Summary); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Untyped); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Histogram); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BucketSpan); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Exemplar); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Metric); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MetricFamily); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 12,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
- DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
- EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
- MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
- }.Build()
- File_io_prometheus_client_metrics_proto = out.File
- file_io_prometheus_client_metrics_proto_rawDesc = nil
- file_io_prometheus_client_metrics_proto_goTypes = nil
- file_io_prometheus_client_metrics_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
deleted file mode 100644
index 636a2c1a5e..0000000000
--- a/vendor/github.com/prometheus/common/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Common libraries shared by Prometheus Go components.
-Copyright 2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
deleted file mode 100644
index 25cfaa2164..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "fmt"
- "io"
- "math"
- "mime"
- "net/http"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/encoding/protodelim"
-
- "github.com/prometheus/common/model"
-)
-
-// Decoder types decode an input stream into metric families.
-type Decoder interface {
- Decode(*dto.MetricFamily) error
-}
-
-// DecodeOptions contains options used by the Decoder and in sample extraction.
-type DecodeOptions struct {
- // Timestamp is added to each value from the stream that has no explicit timestamp set.
- Timestamp model.Time
-}
-
-// ResponseFormat extracts the correct format from a HTTP response header.
-// If no matching format can be found FormatUnknown is returned.
-func ResponseFormat(h http.Header) Format {
- ct := h.Get(hdrContentType)
-
- mediatype, params, err := mime.ParseMediaType(ct)
- if err != nil {
- return fmtUnknown
- }
-
- const textType = "text/plain"
-
- switch mediatype {
- case ProtoType:
- if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return fmtUnknown
- }
- if e, ok := params["encoding"]; ok && e != "delimited" {
- return fmtUnknown
- }
- return fmtProtoDelim
-
- case textType:
- if v, ok := params["version"]; ok && v != TextVersion {
- return fmtUnknown
- }
- return fmtText
- }
-
- return fmtUnknown
-}
-
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
-func NewDecoder(r io.Reader, format Format) Decoder {
- switch format.FormatType() {
- case TypeProtoDelim:
- return &protoDecoder{r: bufio.NewReader(r)}
- }
- return &textDecoder{r: r}
-}
-
-// protoDecoder implements the Decoder interface for protocol buffers.
-type protoDecoder struct {
- r protodelim.Reader
-}
-
-// Decode implements the Decoder interface.
-func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- opts := protodelim.UnmarshalOptions{
- MaxSize: -1,
- }
- if err := opts.UnmarshalFrom(d.r, v); err != nil {
- return err
- }
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
- return fmt.Errorf("invalid metric name %q", v.GetName())
- }
- for _, m := range v.GetMetric() {
- if m == nil {
- continue
- }
- for _, l := range m.GetLabel() {
- if l == nil {
- continue
- }
- if !model.LabelValue(l.GetValue()).IsValid() {
- return fmt.Errorf("invalid label value %q", l.GetValue())
- }
- if !model.LabelName(l.GetName()).IsValid() {
- return fmt.Errorf("invalid label name %q", l.GetName())
- }
- }
- }
- return nil
-}
-
-// textDecoder implements the Decoder interface for the text protocol.
-type textDecoder struct {
- r io.Reader
- fams map[string]*dto.MetricFamily
- err error
-}
-
-// Decode implements the Decoder interface.
-func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- if d.err == nil {
- // Read all metrics in one shot.
- var p TextParser
- d.fams, d.err = p.TextToMetricFamilies(d.r)
- // If we don't get an error, store io.EOF for the end.
- if d.err == nil {
- d.err = io.EOF
- }
- }
- // Pick off one MetricFamily per Decode until there's nothing left.
- for key, fam := range d.fams {
- v.Name = fam.Name
- v.Help = fam.Help
- v.Type = fam.Type
- v.Metric = fam.Metric
- delete(d.fams, key)
- return nil
- }
- return d.err
-}
-
-// SampleDecoder wraps a Decoder to extract samples from the metric families
-// decoded by the wrapped Decoder.
-type SampleDecoder struct {
- Dec Decoder
- Opts *DecodeOptions
-
- f dto.MetricFamily
-}
-
-// Decode calls the Decode method of the wrapped Decoder and then extracts the
-// samples from the decoded MetricFamily into the provided model.Vector.
-func (sd *SampleDecoder) Decode(s *model.Vector) error {
- err := sd.Dec.Decode(&sd.f)
- if err != nil {
- return err
- }
- *s, err = extractSamples(&sd.f, sd.Opts)
- return err
-}
-
-// ExtractSamples builds a slice of samples from the provided metric
-// families. If an error occurs during sample extraction, it continues to
-// extract from the remaining metric families. The returned error is the last
-// error that has occurred.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
- var (
- all model.Vector
- lastErr error
- )
- for _, f := range fams {
- some, err := extractSamples(f, o)
- if err != nil {
- lastErr = err
- continue
- }
- all = append(all, some...)
- }
- return all, lastErr
-}
-
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
- switch f.GetType() {
- case dto.MetricType_COUNTER:
- return extractCounter(o, f), nil
- case dto.MetricType_GAUGE:
- return extractGauge(o, f), nil
- case dto.MetricType_SUMMARY:
- return extractSummary(o, f), nil
- case dto.MetricType_UNTYPED:
- return extractUntyped(o, f), nil
- case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f), nil
- }
- return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
-}
-
-func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Counter == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Counter.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Gauge == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Gauge.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Untyped == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Untyped.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Summary == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- for _, q := range m.Summary.Quantile {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- // BUG(matt): Update other names to "quantile".
- lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetValue()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleCount()),
- Timestamp: timestamp,
- })
- }
-
- return samples
-}
-
-func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Histogram == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- infSeen := false
-
- for _, q := range m.Histogram.Bucket {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- if math.IsInf(q.GetUpperBound(), +1) {
- infSeen = true
- }
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetCumulativeCount()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- count := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleCount()),
- Timestamp: timestamp,
- }
- samples = append(samples, count)
-
- if !infSeen {
- // Append an infinity bucket sample.
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: count.Value,
- Timestamp: timestamp,
- })
- }
- }
-
- return samples
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
deleted file mode 100644
index 7f6cbe7d29..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "net/http"
-
- "google.golang.org/protobuf/encoding/protodelim"
- "google.golang.org/protobuf/encoding/prototext"
-
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Encoder types encode metric families into an underlying wire protocol.
-type Encoder interface {
- Encode(*dto.MetricFamily) error
-}
-
-// Closer is implemented by Encoders that need to be closed to finalize
-// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
-//
-// Note that all Encoder implementations returned from this package implement
-// Closer, too, even if the Close call is a no-op. This happens in preparation
-// for adding a Close method to the Encoder interface directly in a (mildly
-// breaking) release in the future.
-type Closer interface {
- Close() error
-}
-
-type encoderCloser struct {
- encode func(*dto.MetricFamily) error
- close func() error
-}
-
-func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
- return ec.encode(v)
-}
-
-func (ec encoderCloser) Close() error {
- return ec.close()
-}
-
-// Negotiate returns the Content-Type based on the given Accept header. If no
-// appropriate accepted type is found, FmtText is returned (which is the
-// Prometheus text format). This function will never negotiate FmtOpenMetrics,
-// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
-func Negotiate(h http.Header) Format {
- escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
- switch Format(escapeParam) {
- case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
- default:
- // If the escaping parameter is unknown, ignore it.
- }
- }
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return fmtProtoDelim + escapingScheme
- case "text":
- return fmtProtoText + escapingScheme
- case "compact-text":
- return fmtProtoCompact + escapingScheme
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
- }
- }
- return fmtText + escapingScheme
-}
-
-// NegotiateIncludingOpenMetrics works like Negotiate but includes
-// FmtOpenMetrics as an option for the result. Note that this function is
-// temporary and will disappear once FmtOpenMetrics is fully supported and as
-// such may be negotiated by the normal Negotiate function.
-func NegotiateIncludingOpenMetrics(h http.Header) Format {
- escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
- switch Format(escapeParam) {
- case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
- default:
- // If the escaping parameter is unknown, ignore it.
- }
- }
- ver := ac.Params["version"]
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return fmtProtoDelim + escapingScheme
- case "text":
- return fmtProtoText + escapingScheme
- case "compact-text":
- return fmtProtoCompact + escapingScheme
- }
- }
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
- }
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
- switch ver {
- case OpenMetricsVersion_1_0_0:
- return fmtOpenMetrics_1_0_0 + escapingScheme
- default:
- return fmtOpenMetrics_0_0_1 + escapingScheme
- }
- }
- }
- return fmtText + escapingScheme
-}
-
-// NewEncoder returns a new encoder based on content type negotiation. All
-// Encoder implementations returned by NewEncoder also implement Closer, and
-// callers should always call the Close method. It is currently only required
-// for FmtOpenMetrics, but a future (breaking) release will add the Close method
-// to the Encoder interface directly. The current version of the Encoder
-// interface is kept for backwards compatibility.
-// In cases where the Format does not allow for UTF-8 names, the global
-// NameEscapingScheme will be applied.
-//
-// NewEncoder can be called with additional options to customize the OpenMetrics text output.
-// For example:
-// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
-//
-// Extra options are ignored for all other formats.
-func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
- escapingScheme := format.ToEscapingScheme()
-
- switch format.FormatType() {
- case TypeProtoDelim:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := protodelim.MarshalTo(w, v)
- return err
- },
- close: func() error { return nil },
- }
- case TypeProtoCompact:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
- return err
- },
- close: func() error { return nil },
- }
- case TypeProtoText:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
- return err
- },
- close: func() error { return nil },
- }
- case TypeTextPlain:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
- return err
- },
- close: func() error { return nil },
- }
- case TypeOpenMetrics:
- return encoderCloser{
- encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
- return err
- },
- close: func() error {
- _, err := FinalizeOpenMetrics(w)
- return err
- },
- }
- }
- panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
deleted file mode 100644
index 051b38cd17..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package expfmt contains tools for reading and writing Prometheus metrics.
-package expfmt
-
-import (
- "fmt"
- "strings"
-
- "github.com/prometheus/common/model"
-)
-
-// Format specifies the HTTP content type of the different wire protocols.
-type Format string
-
-// Constants to assemble the Content-Type values for the different wire
-// protocols. The Content-Type strings here are all for the legacy exposition
-// formats, where valid characters for metric names and label names are limited.
-// Support for arbitrary UTF-8 characters in those names is already partially
-// implemented in this module (see model.ValidationScheme), but to actually use
-// it on the wire, new content-type strings will have to be agreed upon and
-// added here.
-const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion_0_0_1 = "0.0.1"
- OpenMetricsVersion_1_0_0 = "1.0.0"
-
- // The Content-Type values for the different wire protocols. Note that these
- // values are now unexported. If code was relying on comparisons to these
- // constants, instead use FormatType().
- fmtUnknown Format = ``
- fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- fmtProtoDelim Format = protoFmt + ` encoding=delimited`
- fmtProtoText Format = protoFmt + ` encoding=text`
- fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
- fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
- fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
-)
-
-const (
- hdrContentType = "Content-Type"
- hdrAccept = "Accept"
-)
-
-// FormatType is a Go enum representing the overall category for the given
-// Format. As the number of Format permutations increases, doing basic string
-// comparisons are not feasible, so this enum captures the most useful
-// high-level attribute of the Format string.
-type FormatType int
-
-const (
- TypeUnknown FormatType = iota
- TypeProtoCompact
- TypeProtoDelim
- TypeProtoText
- TypeTextPlain
- TypeOpenMetrics
-)
-
-// NewFormat generates a new Format from the type provided. Mostly used for
-// tests, most Formats should be generated as part of content negotiation in
-// encode.go. If a type has more than one version, the latest version will be
-// returned.
-func NewFormat(t FormatType) Format {
- switch t {
- case TypeProtoCompact:
- return fmtProtoCompact
- case TypeProtoDelim:
- return fmtProtoDelim
- case TypeProtoText:
- return fmtProtoText
- case TypeTextPlain:
- return fmtText
- case TypeOpenMetrics:
- return fmtOpenMetrics_1_0_0
- default:
- return fmtUnknown
- }
-}
-
-// NewOpenMetricsFormat generates a new OpenMetrics format matching the
-// specified version number.
-func NewOpenMetricsFormat(version string) (Format, error) {
- if version == OpenMetricsVersion_0_0_1 {
- return fmtOpenMetrics_0_0_1, nil
- }
- if version == OpenMetricsVersion_1_0_0 {
- return fmtOpenMetrics_1_0_0, nil
- }
- return fmtUnknown, fmt.Errorf("unknown open metrics version string")
-}
-
-// FormatType deduces an overall FormatType for the given format.
-func (f Format) FormatType() FormatType {
- toks := strings.Split(string(f), ";")
- params := make(map[string]string)
- for i, t := range toks {
- if i == 0 {
- continue
- }
- args := strings.Split(t, "=")
- if len(args) != 2 {
- continue
- }
- params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
- }
-
- switch strings.TrimSpace(toks[0]) {
- case ProtoType:
- if params["proto"] != ProtoProtocol {
- return TypeUnknown
- }
- switch params["encoding"] {
- case "delimited":
- return TypeProtoDelim
- case "text":
- return TypeProtoText
- case "compact-text":
- return TypeProtoCompact
- default:
- return TypeUnknown
- }
- case OpenMetricsType:
- if params["charset"] != "utf-8" {
- return TypeUnknown
- }
- return TypeOpenMetrics
- case "text/plain":
- v, ok := params["version"]
- if !ok {
- return TypeTextPlain
- }
- if v == TextVersion {
- return TypeTextPlain
- }
- return TypeUnknown
- default:
- return TypeUnknown
- }
-}
-
-// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
-// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
-// "escaping" term exists, that will be used. Otherwise, the global default will
-// be returned.
-func (format Format) ToEscapingScheme() model.EscapingScheme {
- for _, p := range strings.Split(string(format), ";") {
- toks := strings.Split(p, "=")
- if len(toks) != 2 {
- continue
- }
- key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
- if key == model.EscapingKey {
- scheme, err := model.ToEscapingScheme(value)
- if err != nil {
- return model.NameEscapingScheme
- }
- return scheme
- }
- }
- return model.NameEscapingScheme
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
deleted file mode 100644
index dfac962a4e..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Build only when actually fuzzing
-//go:build gofuzz
-// +build gofuzz
-
-package expfmt
-
-import "bytes"
-
-// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
-//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
-//
-// Further input samples should go in the folder fuzz/corpus.
-func Fuzz(in []byte) int {
- parser := TextParser{}
- _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
- if err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
deleted file mode 100644
index 353c5e93f9..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ /dev/null
@@ -1,696 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- "google.golang.org/protobuf/types/known/timestamppb"
-
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-type encoderOption struct {
- withCreatedLines bool
- withUnit bool
-}
-
-type EncoderOption func(*encoderOption)
-
-// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
-// to include _created lines (See
-// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
-// Created timestamps can improve the accuracy of series reset detection, but
-// come with a bandwidth cost.
-//
-// At the time of writing, created timestamp ingestion is still experimental in
-// Prometheus and need to be enabled with the feature-flag
-// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
-// still possible. Therefore, it is recommended to use this feature with caution.
-func WithCreatedLines() EncoderOption {
- return func(t *encoderOption) {
- t.withCreatedLines = true
- }
-}
-
-// WithUnit is an EncoderOption enabling a set unit to be written to the output
-// and to be added to the metric name, if it's not there already, as a suffix.
-// Without opting in this way, the unit will not be added to the metric name and,
-// on top of that, the unit will not be passed onto the output, even if it
-// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
-func WithUnit() EncoderOption {
- return func(t *encoderOption) {
- t.withUnit = true
- }
-}
-
-// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
-// OpenMetrics text format and writes the resulting lines to 'out'. It returns
-// the number of bytes written and any error encountered. The output will have
-// the same order as the input, no further sorting is performed. Furthermore,
-// this function assumes the input is already sanitized and does not perform any
-// sanity checks. If the input contains duplicate metrics or invalid metric or
-// label names, the conversion will result in invalid text format output.
-//
-// If metric names conform to the legacy validation pattern, they will be placed
-// outside the brackets in the traditional way, like `foo{}`. If the metric name
-// fails the legacy validation check, it will be placed quoted inside the
-// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// Similar to metric names, if label names conform to the legacy validation
-// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
-// name fails the legacy validation check, it will be quoted:
-// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// This function fulfills the type 'expfmt.encoder'.
-//
-// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
-// on individual metric families, it is the responsibility of the caller to
-// append this line to 'out' once all metric families have been written.
-// Conveniently, this can be done by calling FinalizeOpenMetrics.
-//
-// The output should be fully OpenMetrics compliant. However, there are a few
-// missing features and peculiarities to avoid complications when switching from
-// Prometheus to OpenMetrics or vice versa:
-//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
-// lines. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
-//
-// - According to the OM specs, the `# UNIT` line is optional, but if populated,
-// the unit has to be present in the metric name as its suffix:
-// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
-// However, in order to accommodate any potential scenario where such a change in the
-// metric name is not desirable, the users are here given the choice of either explicitly
-// opt in, in case they wish for the unit to be included in the output AND in the metric name
-// as a suffix (see the description of the WithUnit function above),
-// or not to opt in, in case they don't want for any of that to happen.
-//
-// - No support for the following (optional) features: info type,
-// stateset type, gaugehistogram type.
-//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
-//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
- toOM := encoderOption{}
- for _, option := range options {
- option(&toOM)
- }
-
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var (
- n int
- metricType = in.GetType()
- compliantName = name
- )
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
- compliantName = name[:len(name)-6]
- }
- if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
- compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
- }
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, true)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
- switch metricType {
- case dto.MetricType_COUNTER:
- if strings.HasSuffix(name, "_total") {
- n, err = w.WriteString(" counter\n")
- } else {
- n, err = w.WriteString(" unknown\n")
- }
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" unknown\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
- if toOM.withUnit && in.Unit != nil {
- n, err = w.WriteString("# UNIT ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, compliantName)
- written += n
- if err != nil {
- return
- }
-
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Unit, true)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
-
- var createdTsBytesWritten int
-
- // Finally the samples, one line for each.
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
- compliantName = compliantName + "_total"
- }
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Counter.GetValue(), 0, false,
- metric.Counter.Exemplar,
- )
- if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Gauge.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", compliantName, metric,
- )
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric, "", 0,
- metric.Untyped.GetValue(), 0, false,
- nil,
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", compliantName, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeOpenMetricsSample(
- w, compliantName, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_count", metric, "", 0,
- 0, metric.Summary.GetSampleCount(), true,
- nil,
- )
- if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", compliantName, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeOpenMetricsSample(
- w, compliantName, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- 0, b.GetCumulativeCount(), true,
- b.Exemplar,
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeOpenMetricsSample(
- w, compliantName, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(), 0, false,
- nil,
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeOpenMetricsSample(
- w, compliantName, "_count", metric, "", 0,
- 0, metric.Histogram.GetSampleCount(), true,
- nil,
- )
- if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
- createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
- n += createdTsBytesWritten
- }
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", compliantName, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
-func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
- return w.Write([]byte("# EOF\n"))
-}
-
-// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
-// w, given the metric name, the metric proto message itself, optionally an
-// additional label name with a float64 value (use empty string as label name if
-// not required), the value (optionally as float64 or uint64, determined by
-// useIntValue), and optionally an exemplar (use nil if not required). The
-// function returns the number of bytes written and any error encountered.
-func writeOpenMetricsSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- floatValue float64, intValue uint64, useIntValue bool,
- exemplar *dto.Exemplar,
-) (int, error) {
- written := 0
- n, err := writeOpenMetricsNameAndLabelPairs(
- w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- if useIntValue {
- n, err = writeUint(w, intValue)
- } else {
- n, err = writeOpenMetricsFloat(w, floatValue)
- }
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- // TODO(beorn7): Format this directly without converting to a float first.
- n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
- written += n
- if err != nil {
- return written, err
- }
- }
- if exemplar != nil && len(exemplar.Label) > 0 {
- n, err = writeExemplar(w, exemplar)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
-// formats the float in OpenMetrics style.
-func writeOpenMetricsNameAndLabelPairs(
- w enhancedWriter,
- name string,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- var (
- written int
- separator byte = '{'
- metricInsideBraces = false
- )
-
- if name != "" {
- // If the name does not pass the legacy validity check, we must put the
- // metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
- metricInsideBraces = true
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
-
- n, err := writeName(w, name)
- written += n
- if err != nil {
- return written, err
- }
- }
-
- if len(in) == 0 && additionalLabelName == "" {
- if metricInsideBraces {
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- }
- return written, nil
- }
-
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := writeName(w, lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeOpenMetricsCreated writes the created timestamp for a single time series
-// following OpenMetrics text format to w, given the metric name, the metric proto
-// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
-// an additional label name with a float64 value (use empty string as label name if
-// not required) and the timestamp that represents the created timestamp.
-// The function returns the number of bytes written and any error encountered.
-func writeOpenMetricsCreated(w enhancedWriter,
- name, suffixToTrim string, metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- createdTimestamp *timestamppb.Timestamp,
-) (int, error) {
- written := 0
- n, err := writeOpenMetricsNameAndLabelPairs(
- w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
-
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
-
- // TODO(beorn7): Format this directly from components of ts to
- // avoid overflow/underflow and precision issues of the float
- // conversion.
- n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
- written += n
- if err != nil {
- return written, err
- }
-
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
-// function returns the number of bytes written and any error encountered.
-func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
- written := 0
- n, err := w.WriteString(" # ")
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeOpenMetricsFloat(w, e.GetValue())
- written += n
- if err != nil {
- return written, err
- }
- if e.Timestamp != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- err = (*e).Timestamp.CheckValid()
- if err != nil {
- return written, err
- }
- ts := (*e).Timestamp.AsTime()
- // TODO(beorn7): Format this directly from components of ts to
- // avoid overflow/underflow and precision issues of the float
- // conversion.
- n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
- written += n
- if err != nil {
- return written, err
- }
- }
- return written, nil
-}
-
-// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
-// number would otherwise contain neither a "." nor an "e".
-func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return w.WriteString("1.0")
- case f == 0:
- return w.WriteString("0.0")
- case f == -1:
- return w.WriteString("-1.0")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- if !bytes.ContainsAny(*bp, "e.") {
- *bp = append(*bp, '.', '0')
- }
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeUint is like writeInt just for uint64.
-func writeUint(w enhancedWriter, u uint64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendUint((*bp)[:0], u, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
deleted file mode 100644
index f9b8265a9e..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ /dev/null
@@ -1,520 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
- "sync"
-
- "github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
-// implements it.
-type enhancedWriter interface {
- io.Writer
- WriteRune(r rune) (n int, err error)
- WriteString(s string) (n int, err error)
- WriteByte(c byte) error
-}
-
-const (
- initialNumBufSize = 24
-)
-
-var (
- bufPool = sync.Pool{
- New: func() interface{} {
- return bufio.NewWriter(io.Discard)
- },
- }
- numBufPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 0, initialNumBufSize)
- return &b
- },
- }
-)
-
-// MetricFamilyToText converts a MetricFamily proto message into text format and
-// writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered. The output will have the same order as the input,
-// no further sorting is performed. Furthermore, this function assumes the input
-// is already sanitized and does not perform any sanity checks. If the input
-// contains duplicate metrics or invalid metric or label names, the conversion
-// will result in invalid text format output.
-//
-// If metric names conform to the legacy validation pattern, they will be placed
-// outside the brackets in the traditional way, like `foo{}`. If the metric name
-// fails the legacy validation check, it will be placed quoted inside the
-// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// Similar to metric names, if label names conform to the legacy validation
-// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
-// name fails the legacy validation check, it will be quoted:
-// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
-// no error will be thrown in this case.
-//
-// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
- // Fail-fast checks.
- if len(in.Metric) == 0 {
- return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
- }
- name := in.GetName()
- if name == "" {
- return 0, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Try the interface upgrade. If it doesn't work, we'll use a
- // bufio.Writer from the sync.Pool.
- w, ok := out.(enhancedWriter)
- if !ok {
- b := bufPool.Get().(*bufio.Writer)
- b.Reset(out)
- w = b
- defer func() {
- bErr := b.Flush()
- if err == nil {
- err = bErr
- }
- bufPool.Put(b)
- }()
- }
-
- var n int
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err = w.WriteString("# HELP ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, name)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return
- }
- n, err = writeEscapedString(w, *in.Help, false)
- written += n
- if err != nil {
- return
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return
- }
- }
- n, err = w.WriteString("# TYPE ")
- written += n
- if err != nil {
- return
- }
- n, err = writeName(w, name)
- written += n
- if err != nil {
- return
- }
- metricType := in.GetType()
- switch metricType {
- case dto.MetricType_COUNTER:
- n, err = w.WriteString(" counter\n")
- case dto.MetricType_GAUGE:
- n, err = w.WriteString(" gauge\n")
- case dto.MetricType_SUMMARY:
- n, err = w.WriteString(" summary\n")
- case dto.MetricType_UNTYPED:
- n, err = w.WriteString(" untyped\n")
- case dto.MetricType_HISTOGRAM:
- n, err = w.WriteString(" histogram\n")
- default:
- return written, fmt.Errorf("unknown metric type %s", metricType.String())
- }
- written += n
- if err != nil {
- return
- }
-
- // Finally the samples, one line for each.
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Counter.GetValue(),
- )
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Gauge.GetValue(),
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- w, name, "", metric, "", 0,
- metric.Untyped.GetValue(),
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeSample(
- w, name, "", metric,
- model.QuantileLabel, q.GetQuantile(),
- q.GetValue(),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Summary.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Summary.GetSampleCount()),
- )
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
- )
- }
- infSeen := false
- for _, b := range metric.Histogram.Bucket {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, b.GetUpperBound(),
- float64(b.GetCumulativeCount()),
- )
- written += n
- if err != nil {
- return
- }
- if math.IsInf(b.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeSample(
- w, name, "_bucket", metric,
- model.BucketLabel, math.Inf(+1),
- float64(metric.Histogram.GetSampleCount()),
- )
- written += n
- if err != nil {
- return
- }
- }
- n, err = writeSample(
- w, name, "_sum", metric, "", 0,
- metric.Histogram.GetSampleSum(),
- )
- written += n
- if err != nil {
- return
- }
- n, err = writeSample(
- w, name, "_count", metric, "", 0,
- float64(metric.Histogram.GetSampleCount()),
- )
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
- )
- }
- written += n
- if err != nil {
- return
- }
- }
- return
-}
-
-// writeSample writes a single sample in text format to w, given the metric
-// name, the metric proto message itself, optionally an additional label name
-// with a float64 value (use empty string as label name if not required), and
-// the value. The function returns the number of bytes written and any error
-// encountered.
-func writeSample(
- w enhancedWriter,
- name, suffix string,
- metric *dto.Metric,
- additionalLabelName string, additionalLabelValue float64,
- value float64,
-) (int, error) {
- written := 0
- n, err := writeNameAndLabelPairs(
- w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
- )
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, value)
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- err = w.WriteByte(' ')
- written++
- if err != nil {
- return written, err
- }
- n, err = writeInt(w, *metric.TimestampMs)
- written += n
- if err != nil {
- return written, err
- }
- }
- err = w.WriteByte('\n')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given metric name and additional label pair into text formatted as
-// required by the text format and writes it to 'w'. An empty slice in
-// combination with an empty string 'additionalLabelName' results in nothing
-// being written. Otherwise, the label pairs are written, escaped as required by
-// the text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered. If the metric name is not
-// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
-// label names will also be quoted.
-func writeNameAndLabelPairs(
- w enhancedWriter,
- name string,
- in []*dto.LabelPair,
- additionalLabelName string, additionalLabelValue float64,
-) (int, error) {
- var (
- written int
- separator byte = '{'
- metricInsideBraces = false
- )
-
- if name != "" {
- // If the name does not pass the legacy validity check, we must put the
- // metric name inside the braces.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
- metricInsideBraces = true
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- n, err := writeName(w, name)
- written += n
- if err != nil {
- return written, err
- }
- }
-
- if len(in) == 0 && additionalLabelName == "" {
- if metricInsideBraces {
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- }
- return written, nil
- }
-
- for _, lp := range in {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := writeName(w, lp.GetName())
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeEscapedString(w, lp.GetValue(), true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- err := w.WriteByte(separator)
- written++
- if err != nil {
- return written, err
- }
- n, err := w.WriteString(additionalLabelName)
- written += n
- if err != nil {
- return written, err
- }
- n, err = w.WriteString(`="`)
- written += n
- if err != nil {
- return written, err
- }
- n, err = writeFloat(w, additionalLabelValue)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- }
- err := w.WriteByte('}')
- written++
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
-// includeDoubleQuote is true - '"' by '\"'.
-var (
- escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
-)
-
-func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
- if includeDoubleQuote {
- return quotedEscaper.WriteString(w, v)
- }
- return escaper.WriteString(w, v)
-}
-
-// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
-// a few common cases for increased efficiency. For non-hardcoded cases, it uses
-// strconv.AppendFloat to avoid allocations, similar to writeInt.
-func writeFloat(w enhancedWriter, f float64) (int, error) {
- switch {
- case f == 1:
- return 1, w.WriteByte('1')
- case f == 0:
- return 1, w.WriteByte('0')
- case f == -1:
- return w.WriteString("-1")
- case math.IsNaN(f):
- return w.WriteString("NaN")
- case math.IsInf(f, +1):
- return w.WriteString("+Inf")
- case math.IsInf(f, -1):
- return w.WriteString("-Inf")
- default:
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
- }
-}
-
-// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
-// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
-// allocations.
-func writeInt(w enhancedWriter, i int64) (int, error) {
- bp := numBufPool.Get().(*[]byte)
- *bp = strconv.AppendInt((*bp)[:0], i, 10)
- written, err := w.Write(*bp)
- numBufPool.Put(bp)
- return written, err
-}
-
-// writeName writes a string as-is if it complies with the legacy naming
-// scheme, or escapes it in double quotes if not.
-func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(model.LabelValue(name)) {
- return w.WriteString(name)
- }
- var written int
- var err error
- err = w.WriteByte('"')
- written++
- if err != nil {
- return written, err
- }
- var n int
- n, err = writeEscapedString(w, name, true)
- written += n
- if err != nil {
- return written, err
- }
- err = w.WriteByte('"')
- written++
- return written, err
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
deleted file mode 100644
index 26490211af..0000000000
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ /dev/null
@@ -1,781 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-
- "google.golang.org/protobuf/proto"
-
- "github.com/prometheus/common/model"
-)
-
-// A stateFn is a function that represents a state in a state machine. By
-// executing it, the state is progressed to the next state. The stateFn returns
-// another stateFn, which represents the new state. The end state is represented
-// by nil.
-type stateFn func() stateFn
-
-// ParseError signals errors while parsing the simple and flat text-based
-// exchange format.
-type ParseError struct {
- Line int
- Msg string
-}
-
-// Error implements the error interface.
-func (e ParseError) Error() string {
- return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
-}
-
-// TextParser is used to parse the simple and flat text-based exchange format. Its
-// zero value is ready to use.
-type TextParser struct {
- metricFamiliesByName map[string]*dto.MetricFamily
- buf *bufio.Reader // Where the parsed input is read through.
- err error // Most recent error.
- lineCount int // Tracks the line count for error messages.
- currentByte byte // The most recent byte read.
- currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
- currentMF *dto.MetricFamily
- currentMetric *dto.Metric
- currentLabelPair *dto.LabelPair
-
- // The remaining member variables are only used for summaries/histograms.
- currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
- // Summary specific.
- summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentQuantile float64
- // Histogram specific.
- histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentBucket float64
- // These tell us if the currently processed line ends on '_count' or
- // '_sum' respectively and belong to a summary/histogram, representing the sample
- // count and sum of that summary/histogram.
- currentIsSummaryCount, currentIsSummarySum bool
- currentIsHistogramCount, currentIsHistogramSum bool
-}
-
-// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
-// format and creates MetricFamily proto messages. It returns the MetricFamily
-// proto messages in a map where the metric names are the keys, along with any
-// error encountered.
-//
-// If the input contains duplicate metrics (i.e. lines with the same metric name
-// and exactly the same label set), the resulting MetricFamily will contain
-// duplicate Metric proto messages. Similar is true for duplicate label
-// names. Checks for duplicates have to be performed separately, if required.
-// Also note that neither the metrics within each MetricFamily are sorted nor
-// the label pairs within each Metric. Sorting is not required for the most
-// frequent use of this method, which is sample ingestion in the Prometheus
-// server. However, for presentation purposes, you might want to sort the
-// metrics, and in some cases, you must sort the labels, e.g. for consumption by
-// the metric family injection hook of the Prometheus registry.
-//
-// Summaries and histograms are rather special beasts. You would probably not
-// use them in the simple text format anyway. This method can deal with
-// summaries and histograms if they are presented in exactly the way the
-// text.Create function creates them.
-//
-// This method must not be called concurrently. If you want to parse different
-// input concurrently, instantiate a separate Parser for each goroutine.
-func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
- p.reset(in)
- for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
- // Magic happens here...
- }
- // Get rid of empty metric families.
- for k, mf := range p.metricFamiliesByName {
- if len(mf.GetMetric()) == 0 {
- delete(p.metricFamiliesByName, k)
- }
- }
- // If p.err is io.EOF now, we have run into a premature end of the input
- // stream. Turn this error into something nicer and more
- // meaningful. (io.EOF is often used as a signal for the legitimate end
- // of an input stream.)
- if p.err != nil && errors.Is(p.err, io.EOF) {
- p.parseError("unexpected end of input stream")
- }
- return p.metricFamiliesByName, p.err
-}
-
-func (p *TextParser) reset(in io.Reader) {
- p.metricFamiliesByName = map[string]*dto.MetricFamily{}
- if p.buf == nil {
- p.buf = bufio.NewReader(in)
- } else {
- p.buf.Reset(in)
- }
- p.err = nil
- p.lineCount = 0
- if p.summaries == nil || len(p.summaries) > 0 {
- p.summaries = map[uint64]*dto.Metric{}
- }
- if p.histograms == nil || len(p.histograms) > 0 {
- p.histograms = map[uint64]*dto.Metric{}
- }
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
-}
-
-// startOfLine represents the state where the next byte read from p.buf is the
-// start of a line (or whitespace leading up to it).
-func (p *TextParser) startOfLine() stateFn {
- p.lineCount++
- if p.skipBlankTab(); p.err != nil {
- // This is the only place that we expect to see io.EOF,
- // which is not an error but the signal that we are done.
- // Any other error that happens to align with the start of
- // a line is still an error.
- if errors.Is(p.err, io.EOF) {
- p.err = nil
- }
- return nil
- }
- switch p.currentByte {
- case '#':
- return p.startComment
- case '\n':
- return p.startOfLine // Empty line, start the next one.
- }
- return p.readingMetricName
-}
-
-// startComment represents the state where the next byte read from p.buf is the
-// start of a comment (or whitespace leading up to it).
-func (p *TextParser) startComment() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- // If we have hit the end of line already, there is nothing left
- // to do. This is not considered a syntax error.
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- keyword := p.currentToken.String()
- if keyword != "HELP" && keyword != "TYPE" {
- // Generic comment, ignore by fast forwarding to end of line.
- for p.currentByte != '\n' {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return nil // Unexpected end of input.
- }
- }
- return p.startOfLine
- }
- // There is something. Next has to be a metric name.
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenAsMetricName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- if !isBlankOrTab(p.currentByte) {
- p.parseError("invalid metric name in comment")
- return nil
- }
- p.setOrCreateCurrentMF()
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- switch keyword {
- case "HELP":
- return p.readingHelp
- case "TYPE":
- return p.readingType
- }
- panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
-}
-
-// readingMetricName represents the state where the last byte read (now in
-// p.currentByte) is the first byte of a metric name.
-func (p *TextParser) readingMetricName() stateFn {
- if p.readTokenAsMetricName(); p.err != nil {
- return nil
- }
- if p.currentToken.Len() == 0 {
- p.parseError("invalid metric name")
- return nil
- }
- p.setOrCreateCurrentMF()
- // Now is the time to fix the type if it hasn't happened yet.
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- // Do not append the newly created currentMetric to
- // currentMF.Metric right now. First wait if this is a summary,
- // and the metric exists already, which we can only know after
- // having read all the labels.
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingLabels
-}
-
-// readingLabels represents the state where the last byte read (now in
-// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
-// first byte of the value (otherwise).
-func (p *TextParser) readingLabels() stateFn {
- // Summaries/histograms are special. We have to reset the
- // currentLabels map, currentQuantile and currentBucket before starting to
- // read labels.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- p.currentLabels = map[string]string{}
- p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
- }
- if p.currentByte != '{' {
- return p.readingValue
- }
- return p.startLabelName
-}
-
-// startLabelName represents the state where the next byte read from p.buf is
-// the start of a label name (or whitespace leading up to it).
-func (p *TextParser) startLabelName() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '}' {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- }
- if p.readTokenAsLabelName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() == 0 {
- p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
- return nil
- }
- p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
- if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
- p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
- return nil
- }
- // Special summary/histogram treatment. Don't add 'quantile' and 'le'
- // labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- return nil
- }
- // Check for duplicate label names.
- labels := make(map[string]struct{})
- for _, l := range p.currentMetric.Label {
- lName := l.GetName()
- if _, exists := labels[lName]; !exists {
- labels[lName] = struct{}{}
- } else {
- p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
- return nil
- }
- }
- return p.startLabelValue
-}
-
-// startLabelValue represents the state where the next byte read from p.buf is
-// the start of a (quoted) label value (or whitespace leading up to it).
-func (p *TextParser) startLabelValue() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '"' {
- p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
- return nil
- }
- if p.readTokenAsLabelValue(); p.err != nil {
- return nil
- }
- if !model.LabelValue(p.currentToken.String()).IsValid() {
- p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
- return nil
- }
- p.currentLabelPair.Value = proto.String(p.currentToken.String())
- // Special treatment of summaries:
- // - Quantile labels are special, will result in dto.Quantile later.
- // - Other labels have to be added to currentLabels for signature calculation.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if p.currentLabelPair.GetName() == model.QuantileLabel {
- if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- // Similar special treatment of histograms.
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if p.currentLabelPair.GetName() == model.BucketLabel {
- if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- switch p.currentByte {
- case ',':
- return p.startLabelName
-
- case '}':
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
- return nil
- }
-}
-
-// readingValue represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the sample value (i.e. a float).
-func (p *TextParser) readingValue() stateFn {
- // When we are here, we have read all the labels, so for the
- // special case of a summary/histogram, we can finally find out
- // if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- signature := model.LabelsToSignature(p.currentLabels)
- if summary := p.summaries[signature]; summary != nil {
- p.currentMetric = summary
- } else {
- p.summaries[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- signature := model.LabelsToSignature(p.currentLabels)
- if histogram := p.histograms[signature]; histogram != nil {
- p.currentMetric = histogram
- } else {
- p.histograms[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else {
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- value, err := parseFloat(p.currentToken.String())
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
- return nil
- }
- switch p.currentMF.GetType() {
- case dto.MetricType_COUNTER:
- p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
- case dto.MetricType_GAUGE:
- p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
- case dto.MetricType_UNTYPED:
- p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
- case dto.MetricType_SUMMARY:
- // *sigh*
- if p.currentMetric.Summary == nil {
- p.currentMetric.Summary = &dto.Summary{}
- }
- switch {
- case p.currentIsSummaryCount:
- p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsSummarySum:
- p.currentMetric.Summary.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentQuantile):
- p.currentMetric.Summary.Quantile = append(
- p.currentMetric.Summary.Quantile,
- &dto.Quantile{
- Quantile: proto.Float64(p.currentQuantile),
- Value: proto.Float64(value),
- },
- )
- }
- case dto.MetricType_HISTOGRAM:
- // *sigh*
- if p.currentMetric.Histogram == nil {
- p.currentMetric.Histogram = &dto.Histogram{}
- }
- switch {
- case p.currentIsHistogramCount:
- p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsHistogramSum:
- p.currentMetric.Histogram.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentBucket):
- p.currentMetric.Histogram.Bucket = append(
- p.currentMetric.Histogram.Bucket,
- &dto.Bucket{
- UpperBound: proto.Float64(p.currentBucket),
- CumulativeCount: proto.Uint64(uint64(value)),
- },
- )
- }
- default:
- p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- return p.startTimestamp
-}
-
-// startTimestamp represents the state where the next byte read from p.buf is
-// the start of the timestamp (or whitespace leading up to it).
-func (p *TextParser) startTimestamp() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
- return nil
- }
- p.currentMetric.TimestampMs = proto.Int64(timestamp)
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() > 0 {
- p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
- return nil
- }
- return p.startOfLine
-}
-
-// readingHelp represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the docstring after 'HELP'.
-func (p *TextParser) readingHelp() stateFn {
- if p.currentMF.Help != nil {
- p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the docstring.
- if p.readTokenUntilNewline(true); p.err != nil {
- return nil // Unexpected end of input.
- }
- p.currentMF.Help = proto.String(p.currentToken.String())
- return p.startOfLine
-}
-
-// readingType represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the type hint after 'HELP'.
-func (p *TextParser) readingType() stateFn {
- if p.currentMF.Type != nil {
- p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the type.
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
- if !ok {
- p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
- return nil
- }
- p.currentMF.Type = dto.MetricType(metricType).Enum()
- return p.startOfLine
-}
-
-// parseError sets p.err to a ParseError at the current line with the given
-// message.
-func (p *TextParser) parseError(msg string) {
- p.err = ParseError{
- Line: p.lineCount,
- Msg: msg,
- }
-}
-
-// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
-// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
-func (p *TextParser) skipBlankTab() {
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
- return
- }
- }
-}
-
-// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
-// anything if p.currentByte is neither ' ' nor '\t'.
-func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
- if isBlankOrTab(p.currentByte) {
- p.skipBlankTab()
- }
-}
-
-// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
-// first byte considered is the byte already read (now in p.currentByte). The
-// first whitespace byte encountered is still copied into p.currentByte, but not
-// into p.currentToken.
-func (p *TextParser) readTokenUntilWhitespace() {
- p.currentToken.Reset()
- for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
-// byte considered is the byte already read (now in p.currentByte). The first
-// newline byte encountered is still copied into p.currentByte, but not into
-// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' translates into '\', and '\n' into a line-feed character.
-// All other escape sequences are invalid and cause an error.
-func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
- p.currentToken.Reset()
- escaped := false
- for p.err == nil {
- if recognizeEscapeSequence && escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '\n':
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a metric name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsMetricName() {
- p.currentToken.Reset()
- if !isValidMetricNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a label name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelName() {
- p.currentToken.Reset()
- if !isValidLabelNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
-// In contrast to the other 'readTokenAs...' functions, which start with the
-// last read byte in p.currentByte, this method ignores p.currentByte and starts
-// with reading a new byte from p.buf. The first byte not part of a label value
-// is still copied into p.currentByte, but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelValue() {
- p.currentToken.Reset()
- escaped := false
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return
- }
- if escaped {
- switch p.currentByte {
- case '"', '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- continue
- }
- switch p.currentByte {
- case '"':
- return
- case '\n':
- p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
-}
-
-func (p *TextParser) setOrCreateCurrentMF() {
- p.currentIsSummaryCount = false
- p.currentIsSummarySum = false
- p.currentIsHistogramCount = false
- p.currentIsHistogramSum = false
- name := p.currentToken.String()
- if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
- return
- }
- // Try out if this is a _sum or _count for a summary/histogram.
- summaryName := summaryMetricName(name)
- if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if isCount(name) {
- p.currentIsSummaryCount = true
- }
- if isSum(name) {
- p.currentIsSummarySum = true
- }
- return
- }
- }
- histogramName := histogramMetricName(name)
- if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if isCount(name) {
- p.currentIsHistogramCount = true
- }
- if isSum(name) {
- p.currentIsHistogramSum = true
- }
- return
- }
- }
- p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
- p.metricFamiliesByName[name] = p.currentMF
-}
-
-func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
-}
-
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
-}
-
-func isValidMetricNameStart(b byte) bool {
- return isValidLabelNameStart(b) || b == ':'
-}
-
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
-}
-
-func isBlankOrTab(b byte) bool {
- return b == ' ' || b == '\t'
-}
-
-func isCount(name string) bool {
- return len(name) > 6 && name[len(name)-6:] == "_count"
-}
-
-func isSum(name string) bool {
- return len(name) > 4 && name[len(name)-4:] == "_sum"
-}
-
-func isBucket(name string) bool {
- return len(name) > 7 && name[len(name)-7:] == "_bucket"
-}
-
-func summaryMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- default:
- return name
- }
-}
-
-func histogramMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- case isBucket(name):
- return name[:len(name)-7]
- default:
- return name
- }
-}
-
-func parseFloat(s string) (float64, error) {
- if strings.ContainsAny(s, "pP_") {
- return 0, fmt.Errorf("unsupported character in float")
- }
- return strconv.ParseFloat(s, 64)
-}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
deleted file mode 100644
index 7723656d58..0000000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-PACKAGE
-
-package goautoneg
-import "bitbucket.org/ww/goautoneg"
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-FUNCTIONS
-
-func Negotiate(header string, alternatives []string) (content_type string)
-Negotiate the most appropriate content_type given the accept header
-and a list of alternatives.
-
-func ParseAccept(header string) (accept []Accept)
-Parse an Accept Header string returning a sorted list
-of clauses
-
-
-TYPES
-
-type Accept struct {
- Type, SubType string
- Q float32
- Params map[string]string
-}
-Structure to represent a clause in an HTTP Accept Header
-
-
-SUBDIRECTORIES
-
- .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index a21b9d15dd..0000000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-package goautoneg
-
-import (
- "sort"
- "strconv"
- "strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
- Type, SubType string
- Q float64
- Params map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
- slice := []Accept(accept)
- return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
- slice := []Accept(accept)
- ai, aj := slice[i], slice[j]
- if ai.Q > aj.Q {
- return true
- }
- if ai.Type != "*" && aj.Type == "*" {
- return true
- }
- if ai.SubType != "*" && aj.SubType == "*" {
- return true
- }
- return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
- slice := []Accept(accept)
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
- parts := strings.Split(header, ",")
- accept = make([]Accept, 0, len(parts))
- for _, part := range parts {
- part := strings.Trim(part, " ")
-
- a := Accept{}
- a.Params = make(map[string]string)
- a.Q = 1.0
-
- mrp := strings.Split(part, ";")
-
- media_range := mrp[0]
- sp := strings.Split(media_range, "/")
- a.Type = strings.Trim(sp[0], " ")
-
- switch {
- case len(sp) == 1 && a.Type == "*":
- a.SubType = "*"
- case len(sp) == 2:
- a.SubType = strings.Trim(sp[1], " ")
- default:
- continue
- }
-
- if len(mrp) == 1 {
- accept = append(accept, a)
- continue
- }
-
- for _, param := range mrp[1:] {
- sp := strings.SplitN(param, "=", 2)
- if len(sp) != 2 {
- continue
- }
- token := strings.Trim(sp[0], " ")
- if token == "q" {
- a.Q, _ = strconv.ParseFloat(sp[1], 32)
- } else {
- a.Params[token] = strings.Trim(sp[1], " ")
- }
- }
-
- accept = append(accept, a)
- }
-
- slice := accept_slice(accept)
- sort.Sort(slice)
-
- return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
- asp := make([][]string, 0, len(alternatives))
- for _, ctype := range alternatives {
- asp = append(asp, strings.SplitN(ctype, "/", 2))
- }
- for _, clause := range ParseAccept(header) {
- for i, ctsp := range asp {
- if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
- content_type = alternatives[i]
- return
- }
- if clause.Type == ctsp[0] && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- if clause.Type == "*" && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- }
- }
- return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
deleted file mode 100644
index 80d1fe944e..0000000000
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "time"
-)
-
-type AlertStatus string
-
-const (
- AlertFiring AlertStatus = "firing"
- AlertResolved AlertStatus = "resolved"
-)
-
-// Alert is a generic representation of an alert in the Prometheus eco-system.
-type Alert struct {
- // Label value pairs for purpose of aggregation, matching, and disposition
- // dispatching. This must minimally include an "alertname" label.
- Labels LabelSet `json:"labels"`
-
- // Extra key/value information which does not define alert identity.
- Annotations LabelSet `json:"annotations"`
-
- // The known time range for this alert. Both ends are optional.
- StartsAt time.Time `json:"startsAt,omitempty"`
- EndsAt time.Time `json:"endsAt,omitempty"`
- GeneratorURL string `json:"generatorURL"`
-}
-
-// Name returns the name of the alert. It is equivalent to the "alertname" label.
-func (a *Alert) Name() string {
- return string(a.Labels[AlertNameLabel])
-}
-
-// Fingerprint returns a unique hash for the alert. It is equivalent to
-// the fingerprint of the alert's label set.
-func (a *Alert) Fingerprint() Fingerprint {
- return a.Labels.Fingerprint()
-}
-
-func (a *Alert) String() string {
- s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
- if a.Resolved() {
- return s + "[resolved]"
- }
- return s + "[active]"
-}
-
-// Resolved returns true iff the activity interval ended in the past.
-func (a *Alert) Resolved() bool {
- return a.ResolvedAt(time.Now())
-}
-
-// ResolvedAt returns true off the activity interval ended before
-// the given timestamp.
-func (a *Alert) ResolvedAt(ts time.Time) bool {
- if a.EndsAt.IsZero() {
- return false
- }
- return !a.EndsAt.After(ts)
-}
-
-// Status returns the status of the alert.
-func (a *Alert) Status() AlertStatus {
- return a.StatusAt(time.Now())
-}
-
-// StatusAt returns the status of the alert at the given timestamp.
-func (a *Alert) StatusAt(ts time.Time) AlertStatus {
- if a.ResolvedAt(ts) {
- return AlertResolved
- }
- return AlertFiring
-}
-
-// Validate checks whether the alert data is inconsistent.
-func (a *Alert) Validate() error {
- if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %w", err)
- }
- if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
- }
- if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %w", err)
- }
- return nil
-}
-
-// Alert is a list of alerts that can be sorted in chronological order.
-type Alerts []*Alert
-
-func (as Alerts) Len() int { return len(as) }
-func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
-
-func (as Alerts) Less(i, j int) bool {
- if as[i].StartsAt.Before(as[j].StartsAt) {
- return true
- }
- if as[i].EndsAt.Before(as[j].EndsAt) {
- return true
- }
- return as[i].Fingerprint() < as[j].Fingerprint()
-}
-
-// HasFiring returns true iff one of the alerts is not resolved.
-func (as Alerts) HasFiring() bool {
- for _, a := range as {
- if !a.Resolved() {
- return true
- }
- }
- return false
-}
-
-// HasFiringAt returns true iff one of the alerts is not resolved
-// at the time ts.
-func (as Alerts) HasFiringAt(ts time.Time) bool {
- for _, a := range as {
- if !a.ResolvedAt(ts) {
- return true
- }
- }
- return false
-}
-
-// Status returns StatusFiring iff at least one of the alerts is firing.
-func (as Alerts) Status() AlertStatus {
- if as.HasFiring() {
- return AlertFiring
- }
- return AlertResolved
-}
-
-// StatusAt returns StatusFiring iff at least one of the alerts is firing
-// at the time ts.
-func (as Alerts) StatusAt(ts time.Time) AlertStatus {
- if as.HasFiringAt(ts) {
- return AlertFiring
- }
- return AlertResolved
-}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
deleted file mode 100644
index fc4de4106e..0000000000
--- a/vendor/github.com/prometheus/common/model/fingerprinting.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "strconv"
-)
-
-// Fingerprint provides a hash-capable representation of a Metric.
-// For our purposes, FNV-1A 64-bit is used.
-type Fingerprint uint64
-
-// FingerprintFromString transforms a string representation into a Fingerprint.
-func FingerprintFromString(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- return Fingerprint(num), err
-}
-
-// ParseFingerprint parses the input string into a fingerprint.
-func ParseFingerprint(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return 0, err
- }
- return Fingerprint(num), nil
-}
-
-func (f Fingerprint) String() string {
- return fmt.Sprintf("%016x", uint64(f))
-}
-
-// Fingerprints represents a collection of Fingerprint subject to a given
-// natural sorting scheme. It implements sort.Interface.
-type Fingerprints []Fingerprint
-
-// Len implements sort.Interface.
-func (f Fingerprints) Len() int {
- return len(f)
-}
-
-// Less implements sort.Interface.
-func (f Fingerprints) Less(i, j int) bool {
- return f[i] < f[j]
-}
-
-// Swap implements sort.Interface.
-func (f Fingerprints) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// FingerprintSet is a set of Fingerprints.
-type FingerprintSet map[Fingerprint]struct{}
-
-// Equal returns true if both sets contain the same elements (and not more).
-func (s FingerprintSet) Equal(o FingerprintSet) bool {
- if len(s) != len(o) {
- return false
- }
-
- for k := range s {
- if _, ok := o[k]; !ok {
- return false
- }
- }
-
- return true
-}
-
-// Intersection returns the elements contained in both sets.
-func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
- myLength, otherLength := len(s), len(o)
- if myLength == 0 || otherLength == 0 {
- return FingerprintSet{}
- }
-
- subSet := s
- superSet := o
-
- if otherLength < myLength {
- subSet = o
- superSet = s
- }
-
- out := FingerprintSet{}
-
- for k := range subSet {
- if _, ok := superSet[k]; ok {
- out[k] = struct{}{}
- }
- }
-
- return out
-}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
deleted file mode 100644
index 367afecd30..0000000000
--- a/vendor/github.com/prometheus/common/model/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializes a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
deleted file mode 100644
index 3317ce22ff..0000000000
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-const (
- // AlertNameLabel is the name of the label containing the an alert's name.
- AlertNameLabel = "alertname"
-
- // ExportedLabelPrefix is the prefix to prepend to the label names present in
- // exported metrics if a label of the same name is added by the server.
- ExportedLabelPrefix = "exported_"
-
- // MetricNameLabel is the label name indicating the metric name of a
- // timeseries.
- MetricNameLabel = "__name__"
-
- // SchemeLabel is the name of the label that holds the scheme on which to
- // scrape a target.
- SchemeLabel = "__scheme__"
-
- // AddressLabel is the name of the label that holds the address of
- // a scrape target.
- AddressLabel = "__address__"
-
- // MetricsPathLabel is the name of the label that holds the path on which to
- // scrape a target.
- MetricsPathLabel = "__metrics_path__"
-
- // ScrapeIntervalLabel is the name of the label that holds the scrape interval
- // used to scrape a target.
- ScrapeIntervalLabel = "__scrape_interval__"
-
- // ScrapeTimeoutLabel is the name of the label that holds the scrape
- // timeout used to scrape a target.
- ScrapeTimeoutLabel = "__scrape_timeout__"
-
- // ReservedLabelPrefix is a prefix which is not legal in user-supplied
- // label names.
- ReservedLabelPrefix = "__"
-
- // MetaLabelPrefix is a prefix for labels that provide meta information.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series.
- MetaLabelPrefix = "__meta_"
-
- // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series. This is reserved for use in
- // Prometheus configuration files by users.
- TmpLabelPrefix = "__tmp_"
-
- // ParamLabelPrefix is a prefix for labels that provide URL parameters
- // used to scrape a target.
- ParamLabelPrefix = "__param_"
-
- // JobLabel is the label name indicating the job from which a timeseries
- // was scraped.
- JobLabel = "job"
-
- // InstanceLabel is the label name used for the instance label.
- InstanceLabel = "instance"
-
- // BucketLabel is used for the label that defines the upper bound of a
- // bucket of a histogram ("le" -> "less or equal").
- BucketLabel = "le"
-
- // QuantileLabel is used for the label that defines the quantile in a
- // summary.
- QuantileLabel = "quantile"
-)
-
-// LabelNameRE is a regular expression matching valid label names. Note that the
-// IsValid method of LabelName performs the same check but faster than a match
-// with this regular expression.
-var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-
-// A LabelName is a key for a LabelSet or Metric. It has a value associated
-// therewith.
-type LabelName string
-
-// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
-// names, and iff it's valid UTF-8 if NameValidationScheme is set to
-// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
-// check but a much faster hardcoded implementation.
-func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- switch NameValidationScheme {
- case LegacyValidation:
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- case UTF8Validation:
- return utf8.ValidString(string(ln))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
- return true
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (ln *LabelName) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// LabelNames is a sortable LabelName slice. In implements sort.Interface.
-type LabelNames []LabelName
-
-func (l LabelNames) Len() int {
- return len(l)
-}
-
-func (l LabelNames) Less(i, j int) bool {
- return l[i] < l[j]
-}
-
-func (l LabelNames) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func (l LabelNames) String() string {
- labelStrings := make([]string, 0, len(l))
- for _, label := range l {
- labelStrings = append(labelStrings, string(label))
- }
- return strings.Join(labelStrings, ", ")
-}
-
-// A LabelValue is an associated value for a LabelName.
-type LabelValue string
-
-// IsValid returns true iff the string is a valid UTF-8.
-func (lv LabelValue) IsValid() bool {
- return utf8.ValidString(string(lv))
-}
-
-// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
-type LabelValues []LabelValue
-
-func (l LabelValues) Len() int {
- return len(l)
-}
-
-func (l LabelValues) Less(i, j int) bool {
- return string(l[i]) < string(l[j])
-}
-
-func (l LabelValues) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-// LabelPair pairs a name with a value.
-type LabelPair struct {
- Name LabelName
- Value LabelValue
-}
-
-// LabelPairs is a sortable slice of LabelPair pointers. It implements
-// sort.Interface.
-type LabelPairs []*LabelPair
-
-func (l LabelPairs) Len() int {
- return len(l)
-}
-
-func (l LabelPairs) Less(i, j int) bool {
- switch {
- case l[i].Name > l[j].Name:
- return false
- case l[i].Name < l[j].Name:
- return true
- case l[i].Value > l[j].Value:
- return false
- case l[i].Value < l[j].Value:
- return true
- default:
- return false
- }
-}
-
-func (l LabelPairs) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
deleted file mode 100644
index d0ad88da33..0000000000
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
-)
-
-// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
-// may be fully-qualified down to the point where it may resolve to a single
-// Metric in the data store or not. All operations that occur within the realm
-// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
-// match.
-type LabelSet map[LabelName]LabelValue
-
-// Validate checks whether all names and values in the label set
-// are valid.
-func (ls LabelSet) Validate() error {
- for ln, lv := range ls {
- if !ln.IsValid() {
- return fmt.Errorf("invalid name %q", ln)
- }
- if !lv.IsValid() {
- return fmt.Errorf("invalid value %q", lv)
- }
- }
- return nil
-}
-
-// Equal returns true iff both label sets have exactly the same key/value pairs.
-func (ls LabelSet) Equal(o LabelSet) bool {
- if len(ls) != len(o) {
- return false
- }
- for ln, lv := range ls {
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if olv != lv {
- return false
- }
- }
- return true
-}
-
-// Before compares the metrics, using the following criteria:
-//
-// If m has fewer labels than o, it is before o. If it has more, it is not.
-//
-// If the number of labels is the same, the superset of all label names is
-// sorted alphanumerically. The first differing label pair found in that order
-// determines the outcome: If the label does not exist at all in m, then m is
-// before o, and vice versa. Otherwise the label value is compared
-// alphanumerically.
-//
-// If m and o are equal, the method returns false.
-func (ls LabelSet) Before(o LabelSet) bool {
- if len(ls) < len(o) {
- return true
- }
- if len(ls) > len(o) {
- return false
- }
-
- lns := make(LabelNames, 0, len(ls)+len(o))
- for ln := range ls {
- lns = append(lns, ln)
- }
- for ln := range o {
- lns = append(lns, ln)
- }
- // It's probably not worth it to de-dup lns.
- sort.Sort(lns)
- for _, ln := range lns {
- mlv, ok := ls[ln]
- if !ok {
- return true
- }
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if mlv < olv {
- return true
- }
- if mlv > olv {
- return false
- }
- }
- return false
-}
-
-// Clone returns a copy of the label set.
-func (ls LabelSet) Clone() LabelSet {
- lsn := make(LabelSet, len(ls))
- for ln, lv := range ls {
- lsn[ln] = lv
- }
- return lsn
-}
-
-// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
-
- for k, v := range l {
- result[k] = v
- }
-
- for k, v := range other {
- result[k] = v
- }
-
- return result
-}
-
-// Fingerprint returns the LabelSet's fingerprint.
-func (ls LabelSet) Fingerprint() Fingerprint {
- return labelSetToFingerprint(ls)
-}
-
-// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (ls LabelSet) FastFingerprint() Fingerprint {
- return labelSetToFastFingerprint(ls)
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
- var m map[LabelName]LabelValue
- if err := json.Unmarshal(b, &m); err != nil {
- return err
- }
- // encoding/json only unmarshals maps of the form map[string]T. It treats
- // LabelName as a string and does not call its UnmarshalJSON method.
- // Thus, we have to replicate the behavior here.
- for ln := range m {
- if !ln.IsValid() {
- return fmt.Errorf("%q is not a valid label name", ln)
- }
- }
- *l = LabelSet(m)
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
deleted file mode 100644
index 481c47b46e..0000000000
--- a/vendor/github.com/prometheus/common/model/labelset_string.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.21
-
-package model
-
-import (
- "bytes"
- "slices"
- "strconv"
-)
-
-// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
-func (l LabelSet) String() string {
- var lna [32]string // On stack to avoid memory allocation for sorting names.
- labelNames := lna[:0]
- for name := range l {
- labelNames = append(labelNames, string(name))
- }
- slices.Sort(labelNames)
- var bytea [1024]byte // On stack to avoid memory allocation while building the output.
- b := bytes.NewBuffer(bytea[:0])
- b.WriteByte('{')
- for i, name := range labelNames {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(name)
- b.WriteByte('=')
- b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
- }
- b.WriteByte('}')
- return b.String()
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
deleted file mode 100644
index c4212685e7..0000000000
--- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.21
-
-package model
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// String was optimized using functions not available for go 1.20
-// or lower. We keep the old implementation for compatibility with client_golang.
-// Once client golang drops support for go 1.20 (scheduled for August 2024), this
-// file can be removed.
-func (l LabelSet) String() string {
- labelNames := make([]string, 0, len(l))
- for name := range l {
- labelNames = append(labelNames, string(name))
- }
- sort.Strings(labelNames)
- lstrs := make([]string, 0, len(l))
- for _, name := range labelNames {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
- }
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
deleted file mode 100644
index 447ab8ad63..0000000000
--- a/vendor/github.com/prometheus/common/model/metadata.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// MetricType represents metric type values.
-type MetricType string
-
-const (
- MetricTypeCounter = MetricType("counter")
- MetricTypeGauge = MetricType("gauge")
- MetricTypeHistogram = MetricType("histogram")
- MetricTypeGaugeHistogram = MetricType("gaugehistogram")
- MetricTypeSummary = MetricType("summary")
- MetricTypeInfo = MetricType("info")
- MetricTypeStateset = MetricType("stateset")
- MetricTypeUnknown = MetricType("unknown")
-)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
deleted file mode 100644
index eb865e5a59..0000000000
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "regexp"
- "sort"
- "strings"
- "unicode/utf8"
-
- dto "github.com/prometheus/client_model/go"
- "google.golang.org/protobuf/proto"
-)
-
-var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
- // in isolation from other components that don't support UTF-8 may result in
- // bugs or other undefined behavior. This value is intended to be set by
- // UTF-8-aware binaries as part of their startup. To avoid need for locking,
- // this value should be set once, ideally in an init(), before multiple
- // goroutines are started.
- NameValidationScheme = LegacyValidation
-
- // NameEscapingScheme defines the default way that names will be
- // escaped when presented to systems that do not support UTF-8 names. If the
- // Content-Type "escaping" term is specified, that will override this value.
- NameEscapingScheme = ValueEncodingEscaping
-)
-
-// ValidationScheme is a Go enum for determining how metric and label names will
-// be validated by this library.
-type ValidationScheme int
-
-const (
- // LegacyValidation is a setting that requirets that metric and label names
- // conform to the original Prometheus character requirements described by
- // MetricNameRE and LabelNameRE.
- LegacyValidation ValidationScheme = iota
-
- // UTF8Validation only requires that metric and label names be valid UTF-8
- // strings.
- UTF8Validation
-)
-
-type EscapingScheme int
-
-const (
- // NoEscaping indicates that a name will not be escaped. Unescaped names that
- // do not conform to the legacy validity check will use a new exposition
- // format syntax that will be officially standardized in future versions.
- NoEscaping EscapingScheme = iota
-
- // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
- UnderscoreEscaping
-
- // DotsEscaping is similar to UnderscoreEscaping, except that dots are
- // converted to `_dot_` and pre-existing underscores are converted to `__`.
- DotsEscaping
-
- // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
- // characters with the unicode value, surrounded by underscores. Single
- // underscores are replaced with double underscores.
- ValueEncodingEscaping
-)
-
-const (
- // EscapingKey is the key in an Accept or Content-Type header that defines how
- // metric and label names that do not conform to the legacy character
- // requirements should be escaped when being scraped by a legacy prometheus
- // system. If a system does not explicitly pass an escaping parameter in the
- // Accept header, the default NameEscapingScheme will be used.
- EscapingKey = "escaping"
-
- // Possible values for Escaping Key:
- AllowUTF8 = "allow-utf-8" // No escaping required.
- EscapeUnderscores = "underscores"
- EscapeDots = "dots"
- EscapeValues = "values"
-)
-
-// MetricNameRE is a regular expression matching valid metric
-// names. Note that the IsValidMetricName function performs the same
-// check but faster than a match with this regular expression.
-var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-
-// A Metric is similar to a LabelSet, but the key difference is that a Metric is
-// a singleton and refers to one and only one stream of samples.
-type Metric LabelSet
-
-// Equal compares the metrics.
-func (m Metric) Equal(o Metric) bool {
- return LabelSet(m).Equal(LabelSet(o))
-}
-
-// Before compares the metrics' underlying label sets.
-func (m Metric) Before(o Metric) bool {
- return LabelSet(m).Before(LabelSet(o))
-}
-
-// Clone returns a copy of the Metric.
-func (m Metric) Clone() Metric {
- clone := make(Metric, len(m))
- for k, v := range m {
- clone[k] = v
- }
- return clone
-}
-
-func (m Metric) String() string {
- metricName, hasName := m[MetricNameLabel]
- numLabels := len(m) - 1
- if !hasName {
- numLabels = len(m)
- }
- labelStrings := make([]string, 0, numLabels)
- for label, value := range m {
- if label != MetricNameLabel {
- labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
- }
- }
-
- switch numLabels {
- case 0:
- if hasName {
- return string(metricName)
- }
- return "{}"
- default:
- sort.Strings(labelStrings)
- return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
- }
-}
-
-// Fingerprint returns a Metric's Fingerprint.
-func (m Metric) Fingerprint() Fingerprint {
- return LabelSet(m).Fingerprint()
-}
-
-// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (m Metric) FastFingerprint() Fingerprint {
- return LabelSet(m).FastFingerprint()
-}
-
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
-// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
-// selected.
-func IsValidMetricName(n LabelValue) bool {
- switch NameValidationScheme {
- case LegacyValidation:
- return IsValidLegacyMetricName(n)
- case UTF8Validation:
- if len(n) == 0 {
- return false
- }
- return utf8.ValidString(string(n))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
-}
-
-// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
-// legacy validation scheme regardless of the value of NameValidationScheme.
-// This function, however, does not use MetricNameRE for the check but a much
-// faster hardcoded implementation.
-func IsValidLegacyMetricName(n LabelValue) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !isValidLegacyRune(b, i) {
- return false
- }
- }
- return true
-}
-
-// EscapeMetricFamily escapes the given metric names and labels with the given
-// escaping scheme. Returns a new object that uses the same pointers to fields
-// when possible and creates new escaped versions so as not to mutate the
-// input.
-func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
- if v == nil {
- return nil
- }
-
- if scheme == NoEscaping {
- return v
- }
-
- out := &dto.MetricFamily{
- Help: v.Help,
- Type: v.Type,
- Unit: v.Unit,
- }
-
- // If the name is nil, copy as-is, don't try to escape.
- if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
- out.Name = v.Name
- } else {
- out.Name = proto.String(EscapeName(v.GetName(), scheme))
- }
- for _, m := range v.Metric {
- if !metricNeedsEscaping(m) {
- out.Metric = append(out.Metric, m)
- continue
- }
-
- escaped := &dto.Metric{
- Gauge: m.Gauge,
- Counter: m.Counter,
- Summary: m.Summary,
- Untyped: m.Untyped,
- Histogram: m.Histogram,
- TimestampMs: m.TimestampMs,
- }
-
- for _, l := range m.Label {
- if l.GetName() == MetricNameLabel {
- if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
- escaped.Label = append(escaped.Label, l)
- continue
- }
- escaped.Label = append(escaped.Label, &dto.LabelPair{
- Name: proto.String(MetricNameLabel),
- Value: proto.String(EscapeName(l.GetValue(), scheme)),
- })
- continue
- }
- if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
- escaped.Label = append(escaped.Label, l)
- continue
- }
- escaped.Label = append(escaped.Label, &dto.LabelPair{
- Name: proto.String(EscapeName(l.GetName(), scheme)),
- Value: l.Value,
- })
- }
- out.Metric = append(out.Metric, escaped)
- }
- return out
-}
-
-func metricNeedsEscaping(m *dto.Metric) bool {
- for _, l := range m.Label {
- if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
- return true
- }
- if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
- return true
- }
- }
- return false
-}
-
-const (
- lowerhex = "0123456789abcdef"
-)
-
-// EscapeName escapes the incoming name according to the provided escaping
-// scheme. Depending on the rules of escaping, this may cause no change in the
-// string that is returned. (Especially NoEscaping, which by definition is a
-// noop). This function does not do any validation of the name.
-func EscapeName(name string, scheme EscapingScheme) string {
- if len(name) == 0 {
- return name
- }
- var escaped strings.Builder
- switch scheme {
- case NoEscaping:
- return name
- case UnderscoreEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
- return name
- }
- for i, b := range name {
- if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else {
- escaped.WriteRune('_')
- }
- }
- return escaped.String()
- case DotsEscaping:
- // Do not early return for legacy valid names, we still escape underscores.
- for i, b := range name {
- if b == '_' {
- escaped.WriteString("__")
- } else if b == '.' {
- escaped.WriteString("_dot_")
- } else if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else {
- escaped.WriteRune('_')
- }
- }
- return escaped.String()
- case ValueEncodingEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
- return name
- }
- escaped.WriteString("U__")
- for i, b := range name {
- if isValidLegacyRune(b, i) {
- escaped.WriteRune(b)
- } else if !utf8.ValidRune(b) {
- escaped.WriteString("_FFFD_")
- } else if b < 0x100 {
- escaped.WriteRune('_')
- for s := 4; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
- escaped.WriteRune('_')
- } else if b < 0x10000 {
- escaped.WriteRune('_')
- for s := 12; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
- escaped.WriteRune('_')
- }
- }
- return escaped.String()
- default:
- panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
- }
-}
-
-// lower function taken from strconv.atoi
-func lower(c byte) byte {
- return c | ('x' - 'X')
-}
-
-// UnescapeName unescapes the incoming name according to the provided escaping
-// scheme if possible. Some schemes are partially or totally non-roundtripable.
-// If any error is enountered, returns the original input.
-func UnescapeName(name string, scheme EscapingScheme) string {
- if len(name) == 0 {
- return name
- }
- switch scheme {
- case NoEscaping:
- return name
- case UnderscoreEscaping:
- // It is not possible to unescape from underscore replacement.
- return name
- case DotsEscaping:
- name = strings.ReplaceAll(name, "_dot_", ".")
- name = strings.ReplaceAll(name, "__", "_")
- return name
- case ValueEncodingEscaping:
- escapedName, found := strings.CutPrefix(name, "U__")
- if !found {
- return name
- }
-
- var unescaped strings.Builder
- TOP:
- for i := 0; i < len(escapedName); i++ {
- // All non-underscores are treated normally.
- if escapedName[i] != '_' {
- unescaped.WriteByte(escapedName[i])
- continue
- }
- i++
- if i >= len(escapedName) {
- return name
- }
- // A double underscore is a single underscore.
- if escapedName[i] == '_' {
- unescaped.WriteByte('_')
- continue
- }
- // We think we are in a UTF-8 code, process it.
- var utf8Val uint
- for j := 0; i < len(escapedName); j++ {
- // This is too many characters for a utf8 value.
- if j > 4 {
- return name
- }
- // Found a closing underscore, convert to a rune, check validity, and append.
- if escapedName[i] == '_' {
- utf8Rune := rune(utf8Val)
- if !utf8.ValidRune(utf8Rune) {
- return name
- }
- unescaped.WriteRune(utf8Rune)
- continue TOP
- }
- r := lower(escapedName[i])
- utf8Val *= 16
- if r >= '0' && r <= '9' {
- utf8Val += uint(r) - '0'
- } else if r >= 'a' && r <= 'f' {
- utf8Val += uint(r) - 'a' + 10
- } else {
- return name
- }
- i++
- }
- // Didn't find closing underscore, invalid.
- return name
- }
- return unescaped.String()
- default:
- panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
- }
-}
-
-func isValidLegacyRune(b rune, i int) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
-}
-
-func (e EscapingScheme) String() string {
- switch e {
- case NoEscaping:
- return AllowUTF8
- case UnderscoreEscaping:
- return EscapeUnderscores
- case DotsEscaping:
- return EscapeDots
- case ValueEncodingEscaping:
- return EscapeValues
- default:
- panic(fmt.Sprintf("unknown format scheme %d", e))
- }
-}
-
-func ToEscapingScheme(s string) (EscapingScheme, error) {
- if s == "" {
- return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
- }
- switch s {
- case AllowUTF8:
- return NoEscaping, nil
- case EscapeUnderscores:
- return UnderscoreEscaping, nil
- case EscapeDots:
- return DotsEscaping, nil
- case EscapeValues:
- return ValueEncodingEscaping, nil
- default:
- return NoEscaping, fmt.Errorf("unknown format scheme " + s)
- }
-}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
deleted file mode 100644
index a7b9691707..0000000000
--- a/vendor/github.com/prometheus/common/model/model.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package model contains common data structures that are shared across
-// Prometheus components and libraries.
-package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
deleted file mode 100644
index dc8a0026c4..0000000000
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "sort"
-)
-
-// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
-// used to separate label names, label values, and other strings from each other
-// when calculating their combined hash value (aka signature aka fingerprint).
-const SeparatorByte byte = 255
-
-// cache the signature of an empty label set.
-var emptyLabelSignature = hashNew()
-
-// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
-// given label set. (Collisions are possible but unlikely if the number of label
-// sets the function is applied to is small.)
-func LabelsToSignature(labels map[string]string) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make([]string, 0, len(labels))
- for labelName := range labels {
- labelNames = append(labelNames, labelName)
- }
- sort.Strings(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, labelName)
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, labels[labelName])
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
-// parameter (rather than a label map) and returns a Fingerprint.
-func labelSetToFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- labelNames := make(LabelNames, 0, len(ls))
- for labelName := range ls {
- labelNames = append(labelNames, labelName)
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(ls[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return Fingerprint(sum)
-}
-
-// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
-// faster and less allocation-heavy hash function, which is more susceptible to
-// create hash collisions. Therefore, collision detection should be applied.
-func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- var result uint64
- for labelName, labelValue := range ls {
- sum := hashNew()
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(labelValue))
- result ^= sum
- }
- return Fingerprint(result)
-}
-
-// SignatureForLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and only includes the labels with the
-// specified LabelNames into the signature calculation. The labels passed in
-// will be sorted by this function.
-func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- sort.Sort(LabelNames(labels))
-
- sum := hashNew()
- for _, label := range labels {
- sum = hashAdd(sum, string(label))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[label]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and excludes the labels with any of the
-// specified LabelNames from the signature calculation.
-func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
- if len(m) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make(LabelNames, 0, len(m))
- for labelName := range m {
- if _, exclude := labels[labelName]; !exclude {
- labelNames = append(labelNames, labelName)
- }
- }
- if len(labelNames) == 0 {
- return emptyLabelSignature
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
deleted file mode 100644
index 910b0b71fc..0000000000
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "time"
-)
-
-// Matcher describes a matches the value of a given label.
-type Matcher struct {
- Name LabelName `json:"name"`
- Value string `json:"value"`
- IsRegex bool `json:"isRegex"`
-}
-
-func (m *Matcher) UnmarshalJSON(b []byte) error {
- type plain Matcher
- if err := json.Unmarshal(b, (*plain)(m)); err != nil {
- return err
- }
-
- if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate returns true iff all fields of the matcher have valid values.
-func (m *Matcher) Validate() error {
- if !m.Name.IsValid() {
- return fmt.Errorf("invalid name %q", m.Name)
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return fmt.Errorf("invalid regular expression %q", m.Value)
- }
- } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
- return fmt.Errorf("invalid value %q", m.Value)
- }
- return nil
-}
-
-// Silence defines the representation of a silence definition in the Prometheus
-// eco-system.
-type Silence struct {
- ID uint64 `json:"id,omitempty"`
-
- Matchers []*Matcher `json:"matchers"`
-
- StartsAt time.Time `json:"startsAt"`
- EndsAt time.Time `json:"endsAt"`
-
- CreatedAt time.Time `json:"createdAt,omitempty"`
- CreatedBy string `json:"createdBy"`
- Comment string `json:"comment,omitempty"`
-}
-
-// Validate returns true iff all fields of the silence have valid values.
-func (s *Silence) Validate() error {
- if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
- }
- for _, m := range s.Matchers {
- if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %w", err)
- }
- }
- if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
- }
- if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
- }
- if s.Comment == "" {
- return fmt.Errorf("comment missing")
- }
- if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
deleted file mode 100644
index 5727452c1e..0000000000
--- a/vendor/github.com/prometheus/common/model/time.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- // MinimumTick is the minimum supported time resolution. This has to be
- // at least time.Second in order for the code below to work.
- minimumTick = time.Millisecond
- // second is the Time duration equivalent to one second.
- second = int64(time.Second / minimumTick)
- // The number of nanoseconds per minimum tick.
- nanosPerTick = int64(minimumTick / time.Nanosecond)
-
- // Earliest is the earliest Time representable. Handy for
- // initializing a high watermark.
- Earliest = Time(math.MinInt64)
- // Latest is the latest Time representable. Handy for initializing
- // a low watermark.
- Latest = Time(math.MaxInt64)
-)
-
-// Time is the number of milliseconds since the epoch
-// (1970-01-01 00:00 UTC) excluding leap seconds.
-type Time int64
-
-// Interval describes an interval between two timestamps.
-type Interval struct {
- Start, End Time
-}
-
-// Now returns the current time as a Time.
-func Now() Time {
- return TimeFromUnixNano(time.Now().UnixNano())
-}
-
-// TimeFromUnix returns the Time equivalent to the Unix Time t
-// provided in seconds.
-func TimeFromUnix(t int64) Time {
- return Time(t * second)
-}
-
-// TimeFromUnixNano returns the Time equivalent to the Unix Time
-// t provided in nanoseconds.
-func TimeFromUnixNano(t int64) Time {
- return Time(t / nanosPerTick)
-}
-
-// Equal reports whether two Times represent the same instant.
-func (t Time) Equal(o Time) bool {
- return t == o
-}
-
-// Before reports whether the Time t is before o.
-func (t Time) Before(o Time) bool {
- return t < o
-}
-
-// After reports whether the Time t is after o.
-func (t Time) After(o Time) bool {
- return t > o
-}
-
-// Add returns the Time t + d.
-func (t Time) Add(d time.Duration) Time {
- return t + Time(d/minimumTick)
-}
-
-// Sub returns the Duration t - o.
-func (t Time) Sub(o Time) time.Duration {
- return time.Duration(t-o) * minimumTick
-}
-
-// Time returns the time.Time representation of t.
-func (t Time) Time() time.Time {
- return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
-}
-
-// Unix returns t as a Unix time, the number of seconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) Unix() int64 {
- return int64(t) / second
-}
-
-// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) UnixNano() int64 {
- return int64(t) * nanosPerTick
-}
-
-// The number of digits after the dot.
-var dotPrecision = int(math.Log10(float64(second)))
-
-// String returns a string representation of the Time.
-func (t Time) String() string {
- return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (t Time) MarshalJSON() ([]byte, error) {
- return []byte(t.String()), nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (t *Time) UnmarshalJSON(b []byte) error {
- p := strings.Split(string(b), ".")
- switch len(p) {
- case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- *t = Time(v * second)
-
- case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- v *= second
-
- prec := dotPrecision - len(p[1])
- if prec < 0 {
- p[1] = p[1][:dotPrecision]
- } else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
- }
-
- va, err := strconv.ParseInt(p[1], 10, 32)
- if err != nil {
- return err
- }
-
- // If the value was something like -0.1 the negative is lost in the
- // parsing because of the leading zero, this ensures that we capture it.
- if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
- *t = Time(v+va) * -1
- } else {
- *t = Time(v + va)
- }
-
- default:
- return fmt.Errorf("invalid time %q", string(b))
- }
- return nil
-}
-
-// Duration wraps time.Duration. It is used to parse the custom duration format
-// from YAML.
-// This type should not propagate beyond the scope of input/output processing.
-type Duration time.Duration
-
-// Set implements pflag/flag.Value
-func (d *Duration) Set(s string) error {
- var err error
- *d, err = ParseDuration(s)
- return err
-}
-
-// Type implements pflag.Value
-func (d *Duration) Type() string {
- return "duration"
-}
-
-func isdigit(c byte) bool { return c >= '0' && c <= '9' }
-
-// Units are required to go in order from biggest to smallest.
-// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
-var unitMap = map[string]struct {
- pos int
- mult uint64
-}{
- "ms": {7, uint64(time.Millisecond)},
- "s": {6, uint64(time.Second)},
- "m": {5, uint64(time.Minute)},
- "h": {4, uint64(time.Hour)},
- "d": {3, uint64(24 * time.Hour)},
- "w": {2, uint64(7 * 24 * time.Hour)},
- "y": {1, uint64(365 * 24 * time.Hour)},
-}
-
-// ParseDuration parses a string into a time.Duration, assuming that a year
-// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(s string) (Duration, error) {
- switch s {
- case "0":
- // Allow 0 without a unit.
- return 0, nil
- case "":
- return 0, errors.New("empty duration string")
- }
-
- orig := s
- var dur uint64
- lastUnitPos := 0
-
- for s != "" {
- if !isdigit(s[0]) {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- // Consume [0-9]*
- i := 0
- for ; i < len(s) && isdigit(s[i]); i++ {
- }
- v, err := strconv.ParseUint(s[:i], 10, 0)
- if err != nil {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- s = s[i:]
-
- // Consume unit.
- for i = 0; i < len(s) && !isdigit(s[i]); i++ {
- }
- if i == 0 {
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
- }
- if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
- return 0, fmt.Errorf("not a valid duration string: %q", orig)
- }
- lastUnitPos = unit.pos
- // Check if the provided duration overflows time.Duration (> ~ 290years).
- if v > 1<<63/unit.mult {
- return 0, errors.New("duration out of range")
- }
- dur += v * unit.mult
- if dur > 1<<63-1 {
- return 0, errors.New("duration out of range")
- }
- }
- return Duration(dur), nil
-}
-
-func (d Duration) String() string {
- var (
- ms = int64(time.Duration(d) / time.Millisecond)
- r = ""
- )
- if ms == 0 {
- return "0s"
- }
-
- f := func(unit string, mult int64, exact bool) {
- if exact && ms%mult != 0 {
- return
- }
- if v := ms / mult; v > 0 {
- r += fmt.Sprintf("%d%s", v, unit)
- ms -= v * mult
- }
- }
-
- // Only format years and weeks if the remainder is zero, as it is often
- // easier to read 90d than 12w6d.
- f("y", 1000*60*60*24*365, true)
- f("w", 1000*60*60*24*7, true)
-
- f("d", 1000*60*60*24, false)
- f("h", 1000*60*60, false)
- f("m", 1000*60, false)
- f("s", 1000, false)
- f("ms", 1, false)
-
- return r
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (d Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(d.String())
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (d *Duration) UnmarshalJSON(bytes []byte) error {
- var s string
- if err := json.Unmarshal(bytes, &s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (d *Duration) MarshalText() ([]byte, error) {
- return []byte(d.String()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (d *Duration) UnmarshalText(text []byte) error {
- var err error
- *d, err = ParseDuration(string(text))
- return err
-}
-
-// MarshalYAML implements the yaml.Marshaler interface.
-func (d Duration) MarshalYAML() (interface{}, error) {
- return d.String(), nil
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
deleted file mode 100644
index 8050637d82..0000000000
--- a/vendor/github.com/prometheus/common/model/value.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strconv"
- "strings"
-)
-
-// ZeroSample is the pseudo zero-value of Sample used to signal a
-// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
-// and metric nil. Note that the natural zero value of Sample has a timestamp
-// of 0, which is possible to appear in a real Sample and thus not suitable
-// to signal a non-existing Sample.
-var ZeroSample = Sample{Timestamp: Earliest}
-
-// Sample is a sample pair associated with a metric. A single sample must either
-// define Value or Histogram but not both. Histogram == nil implies the Value
-// field is used, otherwise it should be ignored.
-type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
- Histogram *SampleHistogram `json:"histogram"`
-}
-
-// Equal compares first the metrics, then the timestamp, then the value. The
-// semantics of value equality is defined by SampleValue.Equal.
-func (s *Sample) Equal(o *Sample) bool {
- if s == o {
- return true
- }
-
- if !s.Metric.Equal(o.Metric) {
- return false
- }
- if !s.Timestamp.Equal(o.Timestamp) {
- return false
- }
- if s.Histogram != nil {
- return s.Histogram.Equal(o.Histogram)
- }
- return s.Value.Equal(o.Value)
-}
-
-func (s Sample) String() string {
- if s.Histogram != nil {
- return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- })
- }
- return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- })
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Sample) MarshalJSON() ([]byte, error) {
- if s.Histogram != nil {
- v := struct {
- Metric Metric `json:"metric"`
- Histogram SampleHistogramPair `json:"histogram"`
- }{
- Metric: s.Metric,
- Histogram: SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- },
- }
- return json.Marshal(&v)
- }
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
- return json.Marshal(&v)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Sample) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- Histogram SampleHistogramPair `json:"histogram"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- Histogram: SampleHistogramPair{
- Timestamp: s.Timestamp,
- Histogram: s.Histogram,
- },
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- s.Metric = v.Metric
- if v.Histogram.Histogram != nil {
- s.Timestamp = v.Histogram.Timestamp
- s.Histogram = v.Histogram.Histogram
- } else {
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
- }
-
- return nil
-}
-
-// Samples is a sortable Sample slice. It implements sort.Interface.
-type Samples []*Sample
-
-func (s Samples) Len() int {
- return len(s)
-}
-
-// Less compares first the metrics, then the timestamp.
-func (s Samples) Less(i, j int) bool {
- switch {
- case s[i].Metric.Before(s[j].Metric):
- return true
- case s[j].Metric.Before(s[i].Metric):
- return false
- case s[i].Timestamp.Before(s[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-func (s Samples) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (s Samples) Equal(o Samples) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, sample := range s {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// SampleStream is a stream of Values belonging to an attached COWMetric.
-type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
-}
-
-func (ss SampleStream) String() string {
- valuesLength := len(ss.Values)
- vals := make([]string, valuesLength+len(ss.Histograms))
- for i, v := range ss.Values {
- vals[i] = v.String()
- }
- for i, v := range ss.Histograms {
- vals[i+valuesLength] = v.String()
- }
- return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
-}
-
-func (ss SampleStream) MarshalJSON() ([]byte, error) {
- if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- Histograms: ss.Histograms,
- }
- return json.Marshal(&v)
- } else if len(ss.Histograms) > 0 {
- v := struct {
- Metric Metric `json:"metric"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Histograms: ss.Histograms,
- }
- return json.Marshal(&v)
- } else {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- }
- return json.Marshal(&v)
- }
-}
-
-func (ss *SampleStream) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
- Histograms []SampleHistogramPair `json:"histograms"`
- }{
- Metric: ss.Metric,
- Values: ss.Values,
- Histograms: ss.Histograms,
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- ss.Metric = v.Metric
- ss.Values = v.Values
- ss.Histograms = v.Histograms
-
- return nil
-}
-
-// Scalar is a scalar value evaluated at the set timestamp.
-type Scalar struct {
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s Scalar) String() string {
- return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Scalar) MarshalJSON() ([]byte, error) {
- v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Scalar) UnmarshalJSON(b []byte) error {
- var f string
- v := [...]interface{}{&s.Timestamp, &f}
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- value, err := strconv.ParseFloat(f, 64)
- if err != nil {
- return fmt.Errorf("error parsing sample value: %w", err)
- }
- s.Value = SampleValue(value)
- return nil
-}
-
-// String is a string value evaluated at the set timestamp.
-type String struct {
- Value string `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s *String) String() string {
- return s.Value
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s String) MarshalJSON() ([]byte, error) {
- return json.Marshal([]interface{}{s.Timestamp, s.Value})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *String) UnmarshalJSON(b []byte) error {
- v := [...]interface{}{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Vector is basically only an alias for Samples, but the
-// contract is that in a Vector, all Samples have the same timestamp.
-type Vector []*Sample
-
-func (vec Vector) String() string {
- entries := make([]string, len(vec))
- for i, s := range vec {
- entries[i] = s.String()
- }
- return strings.Join(entries, "\n")
-}
-
-func (vec Vector) Len() int { return len(vec) }
-func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
-
-// Less compares first the metrics, then the timestamp.
-func (vec Vector) Less(i, j int) bool {
- switch {
- case vec[i].Metric.Before(vec[j].Metric):
- return true
- case vec[j].Metric.Before(vec[i].Metric):
- return false
- case vec[i].Timestamp.Before(vec[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (vec Vector) Equal(o Vector) bool {
- if len(vec) != len(o) {
- return false
- }
-
- for i, sample := range vec {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// Matrix is a list of time series.
-type Matrix []*SampleStream
-
-func (m Matrix) Len() int { return len(m) }
-func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
-func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
- sort.Sort(matCp)
-
- strs := make([]string, len(matCp))
-
- for i, ss := range matCp {
- strs[i] = ss.String()
- }
-
- return strings.Join(strs, "\n")
-}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
deleted file mode 100644
index ae35cc2ab4..0000000000
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "math"
- "strconv"
-)
-
-// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
-// non-existing sample pair. It is a SamplePair with timestamp Earliest and
-// value 0.0. Note that the natural zero value of SamplePair has a timestamp
-// of 0, which is possible to appear in a real SamplePair and thus not
-// suitable to signal a non-existing SamplePair.
-var ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
deleted file mode 100644
index 54bb038cff..0000000000
--- a/vendor/github.com/prometheus/common/model/value_histogram.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
-)
-
-type FloatString float64
-
-func (v FloatString) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-func (v FloatString) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-func (v *FloatString) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("float value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = FloatString(f)
- return nil
-}
-
-type HistogramBucket struct {
- Boundaries int32
- Lower FloatString
- Upper FloatString
- Count FloatString
-}
-
-func (s HistogramBucket) MarshalJSON() ([]byte, error) {
- b, err := json.Marshal(s.Boundaries)
- if err != nil {
- return nil, err
- }
- l, err := json.Marshal(s.Lower)
- if err != nil {
- return nil, err
- }
- u, err := json.Marshal(s.Upper)
- if err != nil {
- return nil, err
- }
- c, err := json.Marshal(s.Count)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
-}
-
-func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
- tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
- wantLen := len(tmp)
- if err := json.Unmarshal(buf, &tmp); err != nil {
- return err
- }
- if gotLen := len(tmp); gotLen != wantLen {
- return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
- }
- return nil
-}
-
-func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
- return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
-}
-
-func (b HistogramBucket) String() string {
- var sb strings.Builder
- lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
- upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
- if lowerInclusive {
- sb.WriteRune('[')
- } else {
- sb.WriteRune('(')
- }
- fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
- if upperInclusive {
- sb.WriteRune(']')
- } else {
- sb.WriteRune(')')
- }
- fmt.Fprintf(&sb, ":%v", b.Count)
- return sb.String()
-}
-
-type HistogramBuckets []*HistogramBucket
-
-func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, bucket := range s {
- if !bucket.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-type SampleHistogram struct {
- Count FloatString `json:"count"`
- Sum FloatString `json:"sum"`
- Buckets HistogramBuckets `json:"buckets"`
-}
-
-func (s SampleHistogram) String() string {
- return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
-}
-
-func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
- return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
-}
-
-type SampleHistogramPair struct {
- Timestamp Time
- // Histogram should never be nil, it's only stored as pointer for efficiency.
- Histogram *SampleHistogram
-}
-
-func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
- if s.Histogram == nil {
- return nil, fmt.Errorf("histogram is nil")
- }
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Histogram)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
- tmp := []interface{}{&s.Timestamp, &s.Histogram}
- wantLen := len(tmp)
- if err := json.Unmarshal(buf, &tmp); err != nil {
- return err
- }
- if gotLen := len(tmp); gotLen != wantLen {
- return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
- }
- if s.Histogram == nil {
- return fmt.Errorf("histogram is null")
- }
- return nil
-}
-
-func (s SampleHistogramPair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
-}
-
-func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
- return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
-}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
deleted file mode 100644
index 726c50ee63..0000000000
--- a/vendor/github.com/prometheus/common/model/value_type.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
-}
-
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
-}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
deleted file mode 100644
index 7cc33ae4a7..0000000000
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/testdata/fixtures/
-/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
deleted file mode 100644
index c24864a927..0000000000
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-linters:
- enable:
- - godot
- - misspell
- - revive
-
-linter-settings:
- godot:
- capital: true
- exclude:
- # Ignore "See: URL"
- - 'See:'
- misspell:
- locale: US
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
deleted file mode 100644
index d325872bdf..0000000000
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Prometheus Community Code of Conduct
-
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
deleted file mode 100644
index 853eb9d49b..0000000000
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# Contributing
-
-Prometheus uses GitHub to manage reviews of pull requests.
-
-* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
-
-* If you have a trivial fix or improvement, go ahead and create a pull request,
- addressing (with `@...`) a suitable maintainer of this repository (see
- [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
-
-* If you plan to do something more involved, first discuss your ideas
- on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
- This will avoid unnecessary work and surely give you and us a good deal
- of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
-
-* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
- and the _Formatting and style_ section of Peter Bourgon's [Go: Best
- Practices for Production
- Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
-
-* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
-
-## Steps to Contribute
-
-Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
-
-Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
-
-For quickly compiling and testing your changes do:
-```
-make test # Make sure all the tests pass before you commit and push :)
-```
-
-We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
-
-## Pull Request Checklist
-
-* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
-
-* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
-
-* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
-
-* Add tests relevant to the fixed bug or new feature.
-
-## Dependency management
-
-The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
-
-All dependencies are vendored in the `vendor/` directory.
-
-To add or update a new dependency, use the `go get` command:
-
-```bash
-# Pick the latest tagged release.
-go get example.com/some/module/pkg
-
-# Pick a specific version.
-go get example.com/some/module/pkg@vX.Y.Z
-```
-
-Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
-
-
-```bash
-# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
-GO111MODULE=on go mod tidy
-
-GO111MODULE=on go mod vendor
-```
-
-You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
-
-
-## API Implementation Guidelines
-
-### Naming and Documentation
-
-Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
-the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
-should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
-
-### Reading vs. Parsing
-
-Most functionality in this library consists of reading files and then parsing the text into structured data. In most
-cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
-a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
-directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
-such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
-in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
-
-### /proc and /sys filesystem I/O
-
-The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
-Many of the files are changing continuously and the data being read can in some cases change between subsequent
-reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
-to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
-full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
-the file.
-
-Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
-the full file, and then using a scanner on the `[]byte` or `string` containing the data.
-
-```
- data, err := util.ReadFileNoStat("/proc/cpuinfo")
- if err != nil {
- return err
- }
- reader := bytes.NewReader(data)
- scanner := bufio.NewScanner(reader)
-```
-
-The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
-can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
-not bother to check the size of the file before reading.
-```
- data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
-```
-
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
deleted file mode 100644
index e00f3b365b..0000000000
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ /dev/null
@@ -1,3 +0,0 @@
-* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
-* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
deleted file mode 100644
index 7edfe4d093..0000000000
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-include Makefile.common
-
-%/.unpacked: %.ttar
- @echo ">> extracting fixtures $*"
- ./ttar -C $(dir $*) -x -f $*.ttar
- touch $@
-
-fixtures: testdata/fixtures/.unpacked
-
-update_fixtures:
- rm -vf testdata/fixtures/.unpacked
- ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
-
-.PHONY: build
-build:
-
-.PHONY: test
-test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
deleted file mode 100644
index 0acfb9d806..0000000000
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# A common Makefile that includes rules to be reused in different prometheus projects.
-# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
-
-# Example usage :
-# Create the main Makefile in the root project directory.
-# include Makefile.common
-# customTarget:
-# @echo ">> Running customTarget"
-#
-
-# Ensure GOBIN is not set during build so that promu is installed to the correct path
-unexport GOBIN
-
-GO ?= go
-GOFMT ?= $(GO)fmt
-FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
-GOOPTS ?=
-GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
-GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
-
-GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
-PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
-
-PROMU := $(FIRST_GOPATH)/bin/promu
-pkgs = ./...
-
-ifeq (arm, $(GOHOSTARCH))
- GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
-else
- GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
-endif
-
-GOTEST := $(GO) test
-GOTEST_DIR :=
-ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell command -v gotestsum 2> /dev/null),)
- GOTEST_DIR := test-results
- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
-endif
-endif
-
-PROMU_VERSION ?= 0.15.0
-PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
-
-SKIP_GOLANGCI_LINT :=
-GOLANGCI_LINT :=
-GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.56.2
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
-# windows isn't included here because of the path separator being different.
-ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
- # If we're in CI and there is an Actions file, that means the linter
- # is being run in Actions, so we don't need to run it here.
- ifneq (,$(SKIP_GOLANGCI_LINT))
- GOLANGCI_LINT :=
- else ifeq (,$(CIRCLE_JOB))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
- else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
- endif
- endif
-endif
-
-PREFIX ?= $(shell pwd)
-BIN_DIR ?= $(shell pwd)
-DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
-DOCKERFILE_PATH ?= ./Dockerfile
-DOCKERBUILD_CONTEXT ?= ./
-DOCKER_REPO ?= prom
-
-DOCKER_ARCHS ?= amd64
-
-BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
-PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
-TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
-
-SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
-
-ifeq ($(GOHOSTARCH),amd64)
- ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
- # Only supported on amd64
- test-flags := -race
- endif
-endif
-
-# This rule is used to forward a target like "build" to "common-build". This
-# allows a new "build" target to be defined in a Makefile which includes this
-# one and override "common-build" without override warnings.
-%: common-% ;
-
-.PHONY: common-all
-common-all: precheck style check_license lint yamllint unused build test
-
-.PHONY: common-style
-common-style:
- @echo ">> checking code style"
- @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
- if [ -n "$${fmtRes}" ]; then \
- echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
- echo "Please ensure you are using $$($(GO) version) for formatting code."; \
- exit 1; \
- fi
-
-.PHONY: common-check_license
-common-check_license:
- @echo ">> checking license header"
- @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
- awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-.PHONY: common-deps
-common-deps:
- @echo ">> getting dependencies"
- $(GO) mod download
-
-.PHONY: update-go-deps
-update-go-deps:
- @echo ">> updating Go dependencies"
- @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get -d $$m; \
- done
- $(GO) mod tidy
-
-.PHONY: common-test-short
-common-test-short: $(GOTEST_DIR)
- @echo ">> running short tests"
- $(GOTEST) -short $(GOOPTS) $(pkgs)
-
-.PHONY: common-test
-common-test: $(GOTEST_DIR)
- @echo ">> running all tests"
- $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
-
-$(GOTEST_DIR):
- @mkdir -p $@
-
-.PHONY: common-format
-common-format:
- @echo ">> formatting code"
- $(GO) fmt $(pkgs)
-
-.PHONY: common-vet
-common-vet:
- @echo ">> vetting code"
- $(GO) vet $(GOOPTS) $(pkgs)
-
-.PHONY: common-lint
-common-lint: $(GOLANGCI_LINT)
-ifdef GOLANGCI_LINT
- @echo ">> running golangci-lint"
- $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-endif
-
-.PHONY: common-lint-fix
-common-lint-fix: $(GOLANGCI_LINT)
-ifdef GOLANGCI_LINT
- @echo ">> running golangci-lint fix"
- $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
-endif
-
-.PHONY: common-yamllint
-common-yamllint:
- @echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell command -v yamllint 2> /dev/null))
- @echo "yamllint not installed so skipping"
-else
- yamllint .
-endif
-
-# For backward-compatibility.
-.PHONY: common-staticcheck
-common-staticcheck: lint
-
-.PHONY: common-unused
-common-unused:
- @echo ">> running check for unused/missing packages in go.mod"
- $(GO) mod tidy
- @git diff --exit-code -- go.sum go.mod
-
-.PHONY: common-build
-common-build: promu
- @echo ">> building binaries"
- $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
-
-.PHONY: common-tarball
-common-tarball: promu
- @echo ">> building release tarball"
- $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
-
-.PHONY: common-docker-repo-name
-common-docker-repo-name:
- @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
-
-.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
-common-docker: $(BUILD_DOCKER_ARCHS)
-$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
- -f $(DOCKERFILE_PATH) \
- --build-arg ARCH="$*" \
- --build-arg OS="linux" \
- $(DOCKERBUILD_CONTEXT)
-
-.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
-common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
-$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
-
-DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
-.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
-common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
-$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
-
-.PHONY: common-docker-manifest
-common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
-
-.PHONY: promu
-promu: $(PROMU)
-
-$(PROMU):
- $(eval PROMU_TMP := $(shell mktemp -d))
- curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
- mkdir -p $(FIRST_GOPATH)/bin
- cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
- rm -r $(PROMU_TMP)
-
-.PHONY: proto
-proto:
- @echo ">> generating code from proto files"
- @./scripts/genproto.sh
-
-ifdef GOLANGCI_LINT
-$(GOLANGCI_LINT):
- mkdir -p $(FIRST_GOPATH)/bin
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
- | sed -e '/install -d/d' \
- | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
-endif
-
-.PHONY: precheck
-precheck::
-
-define PRECHECK_COMMAND_template =
-precheck:: $(1)_precheck
-
-PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
-.PHONY: $(1)_precheck
-$(1)_precheck:
- @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
- echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
- exit 1; \
- fi
-endef
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
deleted file mode 100644
index 53c5e9aa11..0000000000
--- a/vendor/github.com/prometheus/procfs/NOTICE
+++ /dev/null
@@ -1,7 +0,0 @@
-procfs provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-Copyright 2014-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
deleted file mode 100644
index 1224816c2a..0000000000
--- a/vendor/github.com/prometheus/procfs/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# procfs
-
-This package provides functions to retrieve system, kernel, and process
-metrics from the pseudo-filesystems /proc and /sys.
-
-*WARNING*: This package is a work in progress. Its API may still break in
-backwards-incompatible ways without warnings. Use it at your own risk.
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
-[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
-[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
-
-## Usage
-
-The procfs library is organized by packages based on whether the gathered data is coming from
-/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
-/sys, or both. For example, cpu statistics are gathered from
-`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
-point is initialized, and then the stat information is read.
-
-```go
-fs, err := procfs.NewFS("/proc")
-stats, err := fs.Stat()
-```
-
-Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
-
-```go
- fs, err := blockdevice.NewFS("/proc", "/sys")
- stats, err := fs.ProcDiskstats()
-```
-
-## Package Organization
-
-The packages in this project are organized according to (1) whether the data comes from the `/proc` or
-`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
-can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
-is available in the `blockdevices` sub-package.
-
-## Building and Testing
-
-The procfs library is intended to be built as part of another application, so there are no distributable binaries.
-However, most of the API includes unit tests which can be run with `make test`.
-
-### Updating Test Fixtures
-
-The procfs library includes a set of test fixtures which include many example files from
-the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
-which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
-
-```bash
-rm -rf testdata/fixtures
-make test
-```
-
-Next, make the required changes to the extracted files in the `fixtures` directory. When
-the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
-based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
deleted file mode 100644
index fed02d85c7..0000000000
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Reporting a security issue
-
-The Prometheus security policy, including how to report vulnerabilities, can be
-found here:
-
-
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
deleted file mode 100644
index cdcc8a7ccc..0000000000
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-// Learned from include/uapi/linux/if_arp.h.
-const (
- // completed entry (ha valid).
- ATFComplete = 0x02
- // permanent entry.
- ATFPermanent = 0x04
- // Publish entry.
- ATFPublish = 0x08
- // Has requested trailers.
- ATFUseTrailers = 0x10
- // Obsoleted: Want to use a netmask (only for proxy entries).
- ATFNetmask = 0x20
- // Don't answer this addresses.
- ATFDontPublish = 0x40
-)
-
-// ARPEntry contains a single row of the columnar data represented in
-// /proc/net/arp.
-type ARPEntry struct {
- // IP address
- IPAddr net.IP
- // MAC address
- HWAddr net.HardwareAddr
- // Name of the device
- Device string
- // Flags
- Flags byte
-}
-
-// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
-// and then return a slice of ARPEntry's.
-func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := os.ReadFile(fs.proc.Path("net/arp"))
- if err != nil {
- return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
- }
-
- return parseARPEntries(data)
-}
-
-func parseARPEntries(data []byte) ([]ARPEntry, error) {
- lines := strings.Split(string(data), "\n")
- entries := make([]ARPEntry, 0)
- var err error
- const (
- expectedDataWidth = 6
- expectedHeaderWidth = 9
- )
- for _, line := range lines {
- columns := strings.Fields(line)
- width := len(columns)
-
- if width == expectedHeaderWidth || width == 0 {
- continue
- } else if width == expectedDataWidth {
- entry, err := parseARPEntry(columns)
- if err != nil {
- return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
- }
- entries = append(entries, entry)
- } else {
- return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
- }
-
- }
-
- return entries, err
-}
-
-func parseARPEntry(columns []string) (ARPEntry, error) {
- entry := ARPEntry{Device: columns[5]}
- ip := net.ParseIP(columns[0])
- entry.IPAddr = ip
-
- if mac, err := net.ParseMAC(columns[3]); err == nil {
- entry.HWAddr = mac
- } else {
- return ARPEntry{}, err
- }
-
- if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
- entry.Flags = byte(flags)
- } else {
- return ARPEntry{}, err
- }
-
- return entry, nil
-}
-
-// IsComplete returns true if ARP entry is marked with complete flag.
-func (entry *ARPEntry) IsComplete() bool {
- return entry.Flags&ATFComplete != 0
-}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
deleted file mode 100644
index eb88d78aab..0000000000
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// A BuddyInfo is the details parsed from /proc/buddyinfo.
-// The data is comprised of an array of free fragments of each size.
-// The sizes are 2^n*PAGE_SIZE, where n is the array index.
-type BuddyInfo struct {
- Node string
- Zone string
- Sizes []float64
-}
-
-// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
-func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
- file, err := os.Open(fs.proc.Path("buddyinfo"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseBuddyInfo(file)
-}
-
-func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
- var (
- buddyInfo = []BuddyInfo{}
- scanner = bufio.NewScanner(r)
- bucketCount = -1
- )
-
- for scanner.Scan() {
- var err error
- line := scanner.Text()
- parts := strings.Fields(line)
-
- if len(parts) < 4 {
- return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
- }
-
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
- arraySize := len(parts[4:])
-
- if bucketCount == -1 {
- bucketCount = arraySize
- } else {
- if bucketCount != arraySize {
- return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
- }
- }
-
- sizes := make([]float64, arraySize)
- for i := 0; i < arraySize; i++ {
- sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
- }
- }
-
- buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
- }
-
- return buddyInfo, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
deleted file mode 100644
index bf4f3b48c0..0000000000
--- a/vendor/github.com/prometheus/procfs/cmdline.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CmdLine returns the command line of the kernel.
-func (fs FS) CmdLine() ([]string, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
- if err != nil {
- return nil, err
- }
-
- return strings.Fields(string(data)), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
deleted file mode 100644
index f0950bb495..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ /dev/null
@@ -1,519 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
-type CPUInfo struct {
- Processor uint
- VendorID string
- CPUFamily string
- Model string
- ModelName string
- Stepping string
- Microcode string
- CPUMHz float64
- CacheSize string
- PhysicalID string
- Siblings uint
- CoreID string
- CPUCores uint
- APICID string
- InitialAPICID string
- FPU string
- FPUException string
- CPUIDLevel uint
- WP string
- Flags []string
- Bugs []string
- BogoMips float64
- CLFlushSize uint
- CacheAlignment uint
- AddressSizes string
- PowerManagement string
-}
-
-var (
- cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
- cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
-)
-
-// CPUInfo returns information about current system CPUs.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) CPUInfo() ([]CPUInfo, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
- if err != nil {
- return nil, err
- }
- return parseCPUInfo(data)
-}
-
-func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "vendor", "vendor_id":
- cpuinfo[i].VendorID = field[1]
- case "cpu family":
- cpuinfo[i].CPUFamily = field[1]
- case "model":
- cpuinfo[i].Model = field[1]
- case "model name":
- cpuinfo[i].ModelName = field[1]
- case "stepping":
- cpuinfo[i].Stepping = field[1]
- case "microcode":
- cpuinfo[i].Microcode = field[1]
- case "cpu MHz":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "cache size":
- cpuinfo[i].CacheSize = field[1]
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "apicid":
- cpuinfo[i].APICID = field[1]
- case "initial apicid":
- cpuinfo[i].InitialAPICID = field[1]
- case "fpu":
- cpuinfo[i].FPU = field[1]
- case "fpu_exception":
- cpuinfo[i].FPUException = field[1]
- case "cpuid level":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUIDLevel = uint(v)
- case "wp":
- cpuinfo[i].WP = field[1]
- case "flags":
- cpuinfo[i].Flags = strings.Fields(field[1])
- case "bugs":
- cpuinfo[i].Bugs = strings.Fields(field[1])
- case "bogomips":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "clflush size":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CLFlushSize = uint(v)
- case "cache_alignment":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CacheAlignment = uint(v)
- case "address sizes":
- cpuinfo[i].AddressSizes = field[1]
- case "power management":
- cpuinfo[i].PowerManagement = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
- if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
-
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- featuresLine := ""
- commonCPUInfo := CPUInfo{}
- i := 0
- if strings.TrimSpace(field[0]) == "Processor" {
- commonCPUInfo = CPUInfo{ModelName: field[1]}
- i = -1
- } else {
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo = []CPUInfo{firstcpu}
- }
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "BogoMIPS":
- if i == -1 {
- cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
- i++
- cpuinfo[i].Processor = 0
- }
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "Features":
- featuresLine = line
- case "model name":
- cpuinfo[i].ModelName = field[1]
- }
- }
- fields := strings.SplitN(featuresLine, ": ", 2)
- for i := range cpuinfo {
- cpuinfo[i].Flags = strings.Fields(fields[1])
- }
- return cpuinfo, nil
-
-}
-
-func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- commonCPUInfo := CPUInfo{VendorID: field[1]}
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "bogomips per cpu":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- commonCPUInfo.BogoMips = v
- case "features":
- commonCPUInfo.Flags = strings.Fields(field[1])
- }
- if strings.HasPrefix(line, "processor") {
- match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
- if len(match) < 2 {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- cpu := commonCPUInfo
- v, err := strconv.ParseUint(match[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpu.Processor = uint(v)
- cpuinfo = append(cpuinfo, cpu)
- }
- if strings.HasPrefix(line, "cpu number") {
- break
- }
- }
-
- i := 0
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "cpu number":
- i++
- case "cpu MHz dynamic":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- }
- }
-
- return cpuinfo, nil
-}
-
-func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- systemType := field[1]
-
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- cpuinfo[i].VendorID = systemType
- case "cpu model":
- cpuinfo[i].ModelName = field[1]
- case "BogoMIPS":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
- // find the first "processor" line
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- cpuinfo := []CPUInfo{}
- systemType := field[1]
- i := 0
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- cpuinfo[i].VendorID = systemType
- case "CPU Family":
- cpuinfo[i].CPUFamily = field[1]
- case "Model Name":
- cpuinfo[i].ModelName = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "cpu":
- cpuinfo[i].VendorID = field[1]
- case "clock":
- clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
- v, err := strconv.ParseFloat(clock, 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
- scanner := bufio.NewScanner(bytes.NewReader(info))
-
- firstLine := firstNonEmptyLine(scanner)
- if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
- }
- field := strings.SplitN(firstLine, ": ", 2)
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- firstcpu := CPUInfo{Processor: uint(v)}
- cpuinfo := []CPUInfo{firstcpu}
- i := 0
-
- for scanner.Scan() {
- line := scanner.Text()
- if !strings.Contains(line, ":") {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- i = int(v)
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- cpuinfo[i].Processor = uint(v)
- case "hart":
- cpuinfo[i].CoreID = field[1]
- case "isa":
- cpuinfo[i].ModelName = field[1]
- }
- }
- return cpuinfo, nil
-}
-
-func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
- return nil, errors.New("not implemented")
-}
-
-// firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line.
-func firstNonEmptyLine(scanner *bufio.Scanner) string {
- for scanner.Scan() {
- line := scanner.Text()
- if strings.TrimSpace(line) != "" {
- return line
- }
- }
- return ""
-}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
deleted file mode 100644
index 64cfd534c1..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (arm || arm64)
-// +build linux
-// +build arm arm64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
deleted file mode 100644
index d88442f0ed..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
deleted file mode 100644
index c11207f3ab..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (mips || mipsle || mips64 || mips64le)
-// +build linux
-// +build mips mipsle mips64 mips64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
deleted file mode 100644
index a6b2b3127c..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoDummy
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
deleted file mode 100644
index 003bc2ad4a..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (ppc64 || ppc64le)
-// +build linux
-// +build ppc64 ppc64le
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
deleted file mode 100644
index 1c9b7313b6..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (riscv || riscv64)
-// +build linux
-// +build riscv riscv64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
deleted file mode 100644
index fa3686bc00..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
deleted file mode 100644
index a0ef55562e..0000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && (386 || amd64)
-// +build linux
-// +build 386 amd64
-
-package procfs
-
-var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
deleted file mode 100644
index 5f2a37a78b..0000000000
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Crypto holds info parsed from /proc/crypto.
-type Crypto struct {
- Alignmask *uint64
- Async bool
- Blocksize *uint64
- Chunksize *uint64
- Ctxsize *uint64
- Digestsize *uint64
- Driver string
- Geniv string
- Internal string
- Ivsize *uint64
- Maxauthsize *uint64
- MaxKeysize *uint64
- MinKeysize *uint64
- Module string
- Name string
- Priority *int64
- Refcnt *int64
- Seedsize *uint64
- Selftest string
- Type string
- Walksize *uint64
-}
-
-// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
-func (fs FS) Crypto() ([]Crypto, error) {
- path := fs.proc.Path("crypto")
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
-
- }
-
- crypto, err := parseCrypto(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
- }
-
- return crypto, nil
-}
-
-// parseCrypto parses a /proc/crypto stream into Crypto elements.
-func parseCrypto(r io.Reader) ([]Crypto, error) {
- var out []Crypto
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- text := s.Text()
- switch {
- case strings.HasPrefix(text, "name"):
- // Each crypto element begins with its name.
- out = append(out, Crypto{})
- case text == "":
- continue
- }
-
- kv := strings.Split(text, ":")
- if len(kv) != 2 {
- return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
- }
-
- k := strings.TrimSpace(kv[0])
- v := strings.TrimSpace(kv[1])
-
- // Parse the key/value pair into the currently focused element.
- c := &out[len(out)-1]
- if err := c.parseKV(k, v); err != nil {
- return nil, err
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return out, nil
-}
-
-// parseKV parses a key/value pair into the appropriate field of c.
-func (c *Crypto) parseKV(k, v string) error {
- vp := util.NewValueParser(v)
-
- switch k {
- case "async":
- // Interpret literal yes as true.
- c.Async = v == "yes"
- case "blocksize":
- c.Blocksize = vp.PUInt64()
- case "chunksize":
- c.Chunksize = vp.PUInt64()
- case "digestsize":
- c.Digestsize = vp.PUInt64()
- case "driver":
- c.Driver = v
- case "geniv":
- c.Geniv = v
- case "internal":
- c.Internal = v
- case "ivsize":
- c.Ivsize = vp.PUInt64()
- case "maxauthsize":
- c.Maxauthsize = vp.PUInt64()
- case "max keysize":
- c.MaxKeysize = vp.PUInt64()
- case "min keysize":
- c.MinKeysize = vp.PUInt64()
- case "module":
- c.Module = v
- case "name":
- c.Name = v
- case "priority":
- c.Priority = vp.PInt64()
- case "refcnt":
- c.Refcnt = vp.PInt64()
- case "seedsize":
- c.Seedsize = vp.PUInt64()
- case "selftest":
- c.Selftest = v
- case "type":
- c.Type = v
- case "walksize":
- c.Walksize = vp.PUInt64()
- }
-
- return vp.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
deleted file mode 100644
index f9d961e441..0000000000
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package procfs provides functions to retrieve system, kernel and process
-// metrics from the pseudo-filesystem proc.
-//
-// Example:
-//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
deleted file mode 100644
index 4980c875bf..0000000000
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "github.com/prometheus/procfs/internal/fs"
-)
-
-// FS represents the pseudo-filesystem sys, which provides an interface to
-// kernel data structures.
-type FS struct {
- proc fs.FS
- isReal bool
-}
-
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
-
-// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
-// It will error if the mount point directory can't be read or is a file.
-func NewDefaultFS() (FS, error) {
- return NewFS(DefaultMountPoint)
-}
-
-// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
-// if the mount point directory can't be read or is a file.
-func NewFS(mountPoint string) (FS, error) {
- fs, err := fs.NewFS(mountPoint)
- if err != nil {
- return FS{}, err
- }
-
- isReal, err := isRealProc(mountPoint)
- if err != nil {
- return FS{}, err
- }
-
- return FS{fs, isReal}, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
deleted file mode 100644
index 134767d69a..0000000000
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !freebsd && !linux
-// +build !freebsd,!linux
-
-package procfs
-
-// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
- return true, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
deleted file mode 100644
index 80df79c319..0000000000
--- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build freebsd || linux
-// +build freebsd linux
-
-package procfs
-
-import (
- "syscall"
-)
-
-// isRealProc determines whether supplied mountpoint is really a proc filesystem.
-func isRealProc(mountPoint string) (bool, error) {
- stat := syscall.Statfs_t{}
- err := syscall.Statfs(mountPoint, &stat)
- if err != nil {
- return false, err
- }
-
- // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
- return stat.Type == 0x9fa0, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
deleted file mode 100644
index cf2e3eaa03..0000000000
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Fscacheinfo represents fscache statistics.
-type Fscacheinfo struct {
- // Number of index cookies allocated
- IndexCookiesAllocated uint64
- // data storage cookies allocated
- DataStorageCookiesAllocated uint64
- // Number of special cookies allocated
- SpecialCookiesAllocated uint64
- // Number of objects allocated
- ObjectsAllocated uint64
- // Number of object allocation failures
- ObjectAllocationsFailure uint64
- // Number of objects that reached the available state
- ObjectsAvailable uint64
- // Number of objects that reached the dead state
- ObjectsDead uint64
- // Number of objects that didn't have a coherency check
- ObjectsWithoutCoherencyCheck uint64
- // Number of objects that passed a coherency check
- ObjectsWithCoherencyCheck uint64
- // Number of objects that needed a coherency data update
- ObjectsNeedCoherencyCheckUpdate uint64
- // Number of objects that were declared obsolete
- ObjectsDeclaredObsolete uint64
- // Number of pages marked as being cached
- PagesMarkedAsBeingCached uint64
- // Number of uncache page requests seen
- UncachePagesRequestSeen uint64
- // Number of acquire cookie requests seen
- AcquireCookiesRequestSeen uint64
- // Number of acq reqs given a NULL parent
- AcquireRequestsWithNullParent uint64
- // Number of acq reqs rejected due to no cache available
- AcquireRequestsRejectedNoCacheAvailable uint64
- // Number of acq reqs succeeded
- AcquireRequestsSucceeded uint64
- // Number of acq reqs rejected due to error
- AcquireRequestsRejectedDueToError uint64
- // Number of acq reqs failed on ENOMEM
- AcquireRequestsFailedDueToEnomem uint64
- // Number of lookup calls made on cache backends
- LookupsNumber uint64
- // Number of negative lookups made
- LookupsNegative uint64
- // Number of positive lookups made
- LookupsPositive uint64
- // Number of objects created by lookup
- ObjectsCreatedByLookup uint64
- // Number of lookups timed out and requeued
- LookupsTimedOutAndRequed uint64
- InvalidationsNumber uint64
- InvalidationsRunning uint64
- // Number of update cookie requests seen
- UpdateCookieRequestSeen uint64
- // Number of upd reqs given a NULL parent
- UpdateRequestsWithNullParent uint64
- // Number of upd reqs granted CPU time
- UpdateRequestsRunning uint64
- // Number of relinquish cookie requests seen
- RelinquishCookiesRequestSeen uint64
- // Number of rlq reqs given a NULL parent
- RelinquishCookiesWithNullParent uint64
- // Number of rlq reqs waited on completion of creation
- RelinquishRequestsWaitingCompleteCreation uint64
- // Relinqs rtr
- RelinquishRetries uint64
- // Number of attribute changed requests seen
- AttributeChangedRequestsSeen uint64
- // Number of attr changed requests queued
- AttributeChangedRequestsQueued uint64
- // Number of attr changed rejected -ENOBUFS
- AttributeChangedRejectDueToEnobufs uint64
- // Number of attr changed failed -ENOMEM
- AttributeChangedFailedDueToEnomem uint64
- // Number of attr changed ops given CPU time
- AttributeChangedOps uint64
- // Number of allocation requests seen
- AllocationRequestsSeen uint64
- // Number of successful alloc reqs
- AllocationOkRequests uint64
- // Number of alloc reqs that waited on lookup completion
- AllocationWaitingOnLookup uint64
- // Number of alloc reqs rejected -ENOBUFS
- AllocationsRejectedDueToEnobufs uint64
- // Number of alloc reqs aborted -ERESTARTSYS
- AllocationsAbortedDueToErestartsys uint64
- // Number of alloc reqs submitted
- AllocationOperationsSubmitted uint64
- // Number of alloc reqs waited for CPU time
- AllocationsWaitedForCPU uint64
- // Number of alloc reqs aborted due to object death
- AllocationsAbortedDueToObjectDeath uint64
- // Number of retrieval (read) requests seen
- RetrievalsReadRequests uint64
- // Number of successful retr reqs
- RetrievalsOk uint64
- // Number of retr reqs that waited on lookup completion
- RetrievalsWaitingLookupCompletion uint64
- // Number of retr reqs returned -ENODATA
- RetrievalsReturnedEnodata uint64
- // Number of retr reqs rejected -ENOBUFS
- RetrievalsRejectedDueToEnobufs uint64
- // Number of retr reqs aborted -ERESTARTSYS
- RetrievalsAbortedDueToErestartsys uint64
- // Number of retr reqs failed -ENOMEM
- RetrievalsFailedDueToEnomem uint64
- // Number of retr reqs submitted
- RetrievalsRequests uint64
- // Number of retr reqs waited for CPU time
- RetrievalsWaitingCPU uint64
- // Number of retr reqs aborted due to object death
- RetrievalsAbortedDueToObjectDeath uint64
- // Number of storage (write) requests seen
- StoreWriteRequests uint64
- // Number of successful store reqs
- StoreSuccessfulRequests uint64
- // Number of store reqs on a page already pending storage
- StoreRequestsOnPendingStorage uint64
- // Number of store reqs rejected -ENOBUFS
- StoreRequestsRejectedDueToEnobufs uint64
- // Number of store reqs failed -ENOMEM
- StoreRequestsFailedDueToEnomem uint64
- // Number of store reqs submitted
- StoreRequestsSubmitted uint64
- // Number of store reqs granted CPU time
- StoreRequestsRunning uint64
- // Number of pages given store req processing time
- StorePagesWithRequestsProcessing uint64
- // Number of store reqs deleted from tracking tree
- StoreRequestsDeleted uint64
- // Number of store reqs over store limit
- StoreRequestsOverStoreLimit uint64
- // Number of release reqs against pages with no pending store
- ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
- // Number of release reqs against pages stored by time lock granted
- ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
- // Number of release reqs ignored due to in-progress store
- ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
- PageStoresCancelledByReleaseRequests uint64
- VmscanWaiting uint64
- // Number of times async ops added to pending queues
- OpsPending uint64
- // Number of times async ops given CPU time
- OpsRunning uint64
- // Number of times async ops queued for processing
- OpsEnqueued uint64
- // Number of async ops cancelled
- OpsCancelled uint64
- // Number of async ops rejected due to object lookup/create failure
- OpsRejected uint64
- // Number of async ops initialised
- OpsInitialised uint64
- // Number of async ops queued for deferred release
- OpsDeferred uint64
- // Number of async ops released (should equal ini=N when idle)
- OpsReleased uint64
- // Number of deferred-release async ops garbage collected
- OpsGarbageCollected uint64
- // Number of in-progress alloc_object() cache ops
- CacheopAllocationsinProgress uint64
- // Number of in-progress lookup_object() cache ops
- CacheopLookupObjectInProgress uint64
- // Number of in-progress lookup_complete() cache ops
- CacheopLookupCompleteInPorgress uint64
- // Number of in-progress grab_object() cache ops
- CacheopGrabObjectInProgress uint64
- CacheopInvalidations uint64
- // Number of in-progress update_object() cache ops
- CacheopUpdateObjectInProgress uint64
- // Number of in-progress drop_object() cache ops
- CacheopDropObjectInProgress uint64
- // Number of in-progress put_object() cache ops
- CacheopPutObjectInProgress uint64
- // Number of in-progress attr_changed() cache ops
- CacheopAttributeChangeInProgress uint64
- // Number of in-progress sync_cache() cache ops
- CacheopSyncCacheInProgress uint64
- // Number of in-progress read_or_alloc_page() cache ops
- CacheopReadOrAllocPageInProgress uint64
- // Number of in-progress read_or_alloc_pages() cache ops
- CacheopReadOrAllocPagesInProgress uint64
- // Number of in-progress allocate_page() cache ops
- CacheopAllocatePageInProgress uint64
- // Number of in-progress allocate_pages() cache ops
- CacheopAllocatePagesInProgress uint64
- // Number of in-progress write_page() cache ops
- CacheopWritePagesInProgress uint64
- // Number of in-progress uncache_page() cache ops
- CacheopUncachePagesInProgress uint64
- // Number of in-progress dissociate_pages() cache ops
- CacheopDissociatePagesInProgress uint64
- // Number of object lookups/creations rejected due to lack of space
- CacheevLookupsAndCreationsRejectedLackSpace uint64
- // Number of stale objects deleted
- CacheevStaleObjectsDeleted uint64
- // Number of objects retired when relinquished
- CacheevRetiredWhenReliquished uint64
- // Number of objects culled
- CacheevObjectsCulled uint64
-}
-
-// Fscacheinfo returns information about current fscache statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
-func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
- if err != nil {
- return Fscacheinfo{}, err
- }
-
- m, err := parseFscacheinfo(bytes.NewReader(b))
- if err != nil {
- return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
- }
-
- return *m, nil
-}
-
-func setFSCacheFields(fields []string, setFields ...*uint64) error {
- var err error
- if len(fields) < len(setFields) {
- return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
- }
-
- for i := range setFields {
- *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
- var m Fscacheinfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
- }
-
- switch fields[0] {
- case "Cookies:":
- err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
- &m.SpecialCookiesAllocated)
- if err != nil {
- return &m, err
- }
- case "Objects:":
- err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
- &m.ObjectsAvailable, &m.ObjectsDead)
- if err != nil {
- return &m, err
- }
- case "ChkAux":
- err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
- &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
- if err != nil {
- return &m, err
- }
- case "Pages":
- err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
- if err != nil {
- return &m, err
- }
- case "Acquire:":
- err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
- &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
- &m.AcquireRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- case "Lookups:":
- err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
- &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
- if err != nil {
- return &m, err
- }
- case "Invals":
- err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
- if err != nil {
- return &m, err
- }
- case "Updates:":
- err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
- &m.UpdateRequestsRunning)
- if err != nil {
- return &m, err
- }
- case "Relinqs:":
- err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
- &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
- if err != nil {
- return &m, err
- }
- case "AttrChg:":
- err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
- &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
- if err != nil {
- return &m, err
- }
- case "Allocs":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
- &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
- &m.AllocationsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Retrvls:":
- if strings.Split(fields[1], "=")[0] == "n" {
- err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
- &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
- &m.RetrievalsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
- if err != nil {
- return &m, err
- }
- }
- case "Stores":
- if strings.Split(fields[2], "=")[0] == "n" {
- err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
- &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
- &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
- if err != nil {
- return &m, err
- }
- }
- case "VmScan":
- err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
- &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
- &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
- if err != nil {
- return &m, err
- }
- case "Ops":
- if strings.Split(fields[2], "=")[0] == "pend" {
- err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
- if err != nil {
- return &m, err
- }
- }
- case "CacheOp:":
- if strings.Split(fields[1], "=")[0] == "alo" {
- err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
- &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
- if err != nil {
- return &m, err
- }
- } else if strings.Split(fields[1], "=")[0] == "inv" {
- err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
- &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
- &m.CacheopSyncCacheInProgress)
- if err != nil {
- return &m, err
- }
- } else {
- err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
- &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
- &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
- if err != nil {
- return &m, err
- }
- }
- case "CacheEv:":
- err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
- &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
- if err != nil {
- return &m, err
- }
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
deleted file mode 100644
index 3c18c7610e..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fs
-
-import (
- "fmt"
- "os"
- "path/filepath"
-)
-
-const (
- // DefaultProcMountPoint is the common mount point of the proc filesystem.
- DefaultProcMountPoint = "/proc"
-
- // DefaultSysMountPoint is the common mount point of the sys filesystem.
- DefaultSysMountPoint = "/sys"
-
- // DefaultConfigfsMountPoint is the common mount point of the configfs.
- DefaultConfigfsMountPoint = "/sys/kernel/config"
-)
-
-// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
-// interface to kernel data structures.
-type FS string
-
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
-func NewFS(mountPoint string) (FS, error) {
- info, err := os.Stat(mountPoint)
- if err != nil {
- return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
- }
- if !info.IsDir() {
- return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
- }
-
- return FS(mountPoint), nil
-}
-
-// Path appends the given path elements to the filesystem path, adding separators
-// as necessary.
-func (fs FS) Path(p ...string) string {
- return filepath.Join(append([]string{string(fs)}, p...)...)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
deleted file mode 100644
index 14272dc788..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "os"
- "strconv"
- "strings"
-)
-
-// ParseUint32s parses a slice of strings into a slice of uint32s.
-func ParseUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
-
-// ParseUint64s parses a slice of strings into a slice of uint64s.
-func ParseUint64s(ss []string) ([]uint64, error) {
- us := make([]uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, u)
- }
-
- return us, nil
-}
-
-// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
-func ParsePInt64s(ss []string) ([]*int64, error) {
- us := make([]*int64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// Parses a uint64 from given hex in string.
-func ParseHexUint64s(ss []string) ([]*uint64, error) {
- us := make([]*uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
-func ReadUintFromFile(path string) (uint64, error) {
- data, err := os.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ReadIntFromFile reads a file and attempts to parse a int64 from it.
-func ReadIntFromFile(path string) (int64, error) {
- data, err := os.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ParseBool parses a string into a boolean pointer.
-func ParseBool(b string) *bool {
- var truth bool
- switch b {
- case "enabled":
- truth = true
- case "disabled":
- truth = false
- default:
- return nil
- }
- return &truth
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
deleted file mode 100644
index 71b7a70ebd..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "io"
- "os"
-)
-
-// ReadFileNoStat uses io.ReadAll to read contents of entire file.
-// This is similar to os.ReadFile but without the call to os.Stat, because
-// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 1024kB. For files larger than this, a scanner
-// should be used.
-func ReadFileNoStat(filename string) ([]byte, error) {
- const maxBufferSize = 1024 * 1024
-
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- reader := io.LimitReader(f, maxBufferSize)
- return io.ReadAll(reader)
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
deleted file mode 100644
index 1ab875ceec..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (linux || darwin) && !appengine
-// +build linux darwin
-// +build !appengine
-
-package util
-
-import (
- "bytes"
- "os"
- "syscall"
-)
-
-// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
-// https://github.com/prometheus/node_exporter/pull/728/files
-//
-// Note that this function will not read files larger than 128 bytes.
-func SysReadFile(file string) (string, error) {
- f, err := os.Open(file)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- // On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's os.ReadFile implementation to poll forever.
- //
- // Since we either want to read data or bail immediately, do the simplest
- // possible read using syscall directly.
- const sysFileBufferSize = 128
- b := make([]byte, sysFileBufferSize)
- n, err := syscall.Read(int(f.Fd()), b)
- if err != nil {
- return "", err
- }
-
- return string(bytes.TrimSpace(b[:n])), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
deleted file mode 100644
index 1d86f5e63f..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (linux && appengine) || (!linux && !darwin)
-// +build linux,appengine !linux,!darwin
-
-package util
-
-import (
- "fmt"
-)
-
-// SysReadFile is here implemented as a noop for builds that do not support
-// the read syscall. For example Windows, or Linux on Google App Engine.
-func SysReadFile(file string) (string, error) {
- return "", fmt.Errorf("not supported on this platform")
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
deleted file mode 100644
index fe2355d3c6..0000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "strconv"
-)
-
-// TODO(mdlayher): util packages are an anti-pattern and this should be moved
-// somewhere else that is more focused in the future.
-
-// A ValueParser enables parsing a single string into a variety of data types
-// in a concise and safe way. The Err method must be invoked after invoking
-// any other methods to ensure a value was successfully parsed.
-type ValueParser struct {
- v string
- err error
-}
-
-// NewValueParser creates a ValueParser using the input string.
-func NewValueParser(v string) *ValueParser {
- return &ValueParser{v: v}
-}
-
-// Int interprets the underlying value as an int and returns that value.
-func (vp *ValueParser) Int() int { return int(vp.int64()) }
-
-// PInt64 interprets the underlying value as an int64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PInt64() *int64 {
- if vp.err != nil {
- return nil
- }
-
- v := vp.int64()
- return &v
-}
-
-// int64 interprets the underlying value as an int64 and returns that value.
-// TODO: export if/when necessary.
-func (vp *ValueParser) int64() int64 {
- if vp.err != nil {
- return 0
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseInt(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return 0
- }
-
- return v
-}
-
-// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PUInt64() *uint64 {
- if vp.err != nil {
- return nil
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseUint(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return nil
- }
-
- return &v
-}
-
-// Err returns the last error, if any, encountered by the ValueParser.
-func (vp *ValueParser) Err() error {
- return vp.err
-}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
deleted file mode 100644
index bc3a20c932..0000000000
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
-type IPVSStats struct {
- // Total count of connections.
- Connections uint64
- // Total incoming packages processed.
- IncomingPackets uint64
- // Total outgoing packages processed.
- OutgoingPackets uint64
- // Total incoming traffic.
- IncomingBytes uint64
- // Total outgoing traffic.
- OutgoingBytes uint64
-}
-
-// IPVSBackendStatus holds current metrics of one virtual / real address pair.
-type IPVSBackendStatus struct {
- // The local (virtual) IP address.
- LocalAddress net.IP
- // The remote (real) IP address.
- RemoteAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The remote (real) port.
- RemotePort uint16
- // The local firewall mark
- LocalMark string
- // The transport protocol (TCP, UDP).
- Proto string
- // The current number of active connections for this virtual/real address pair.
- ActiveConn uint64
- // The current number of inactive connections for this virtual/real address pair.
- InactConn uint64
- // The current weight of this virtual/real address pair.
- Weight uint64
-}
-
-// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) IPVSStats() (IPVSStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
- if err != nil {
- return IPVSStats{}, err
- }
-
- return parseIPVSStats(bytes.NewReader(data))
-}
-
-// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(r io.Reader) (IPVSStats, error) {
- var (
- statContent []byte
- statLines []string
- statFields []string
- stats IPVSStats
- )
-
- statContent, err := io.ReadAll(r)
- if err != nil {
- return IPVSStats{}, err
- }
-
- statLines = strings.SplitN(string(statContent), "\n", 4)
- if len(statLines) != 4 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
- }
-
- statFields = strings.Fields(statLines[2])
- if len(statFields) != 5 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
- }
-
- stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return stats, nil
-}
-
-// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
- file, err := os.Open(fs.proc.Path("net/ip_vs"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseIPVSBackendStatus(file)
-}
-
-func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
- var (
- status []IPVSBackendStatus
- scanner = bufio.NewScanner(file)
- proto string
- localMark string
- localAddress net.IP
- localPort uint16
- err error
- )
-
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- if len(fields) == 0 {
- continue
- }
- switch {
- case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
- continue
- case fields[0] == "TCP" || fields[0] == "UDP":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = ""
- localAddress, localPort, err = parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- case fields[0] == "FWM":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localMark = fields[1]
- localAddress = nil
- localPort = 0
- case fields[0] == "->":
- if len(fields) < 6 {
- continue
- }
- remoteAddress, remotePort, err := parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- weight, err := strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- activeConn, err := strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- inactConn, err := strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- status = append(status, IPVSBackendStatus{
- LocalAddress: localAddress,
- LocalPort: localPort,
- LocalMark: localMark,
- RemoteAddress: remoteAddress,
- RemotePort: remotePort,
- Proto: proto,
- Weight: weight,
- ActiveConn: activeConn,
- InactConn: inactConn,
- })
- }
- }
- return status, nil
-}
-
-func parseIPPort(s string) (net.IP, uint16, error) {
- var (
- ip net.IP
- err error
- )
-
- switch len(s) {
- case 13:
- ip, err = hex.DecodeString(s[0:8])
- if err != nil {
- return nil, 0, err
- }
- case 46:
- ip = net.ParseIP(s[1:40])
- if ip == nil {
- return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
- }
- default:
- return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
- }
-
- portString := s[len(s)-4:]
- if len(portString) != 4 {
- return nil, 0,
- fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
- }
- port, err := strconv.ParseUint(portString, 16, 16)
- if err != nil {
- return nil, 0, err
- }
-
- return ip, uint16(port), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
deleted file mode 100644
index db88566bdf..0000000000
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "os"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// KernelRandom contains information about to the kernel's random number generator.
-type KernelRandom struct {
- // EntropyAvaliable gives the available entropy, in bits.
- EntropyAvaliable *uint64
- // PoolSize gives the size of the entropy pool, in bits.
- PoolSize *uint64
- // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
- URandomMinReseedSeconds *uint64
- // WriteWakeupThreshold the number of bits of entropy below which we wake up processes
- // that do a select(2) or poll(2) for write access to /dev/random.
- WriteWakeupThreshold *uint64
- // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
- // waiting for entropy from /dev/random.
- ReadWakeupThreshold *uint64
-}
-
-// KernelRandom returns values from /proc/sys/kernel/random.
-func (fs FS) KernelRandom() (KernelRandom, error) {
- random := KernelRandom{}
-
- for file, p := range map[string]**uint64{
- "entropy_avail": &random.EntropyAvaliable,
- "poolsize": &random.PoolSize,
- "urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
- "write_wakeup_threshold": &random.WriteWakeupThreshold,
- "read_wakeup_threshold": &random.ReadWakeupThreshold,
- } {
- val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
- if os.IsNotExist(err) {
- continue
- }
- if err != nil {
- return random, err
- }
- *p = &val
- }
-
- return random, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
deleted file mode 100644
index 332e76c17f..0000000000
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// LoadAvg represents an entry in /proc/loadavg.
-type LoadAvg struct {
- Load1 float64
- Load5 float64
- Load15 float64
-}
-
-// LoadAvg returns loadavg from /proc.
-func (fs FS) LoadAvg() (*LoadAvg, error) {
- path := fs.proc.Path("loadavg")
-
- data, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, err
- }
- return parseLoad(data)
-}
-
-// Parse /proc loadavg and return 1m, 5m and 15m.
-func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
- loads := make([]float64, 3)
- parts := strings.Fields(string(loadavgBytes))
- if len(parts) < 3 {
- return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
- }
-
- var err error
- for i, load := range parts[0:3] {
- loads[i], err = strconv.ParseFloat(load, 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
- }
- }
- return &LoadAvg{
- Load1: loads[0],
- Load5: loads[1],
- Load15: loads[2],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
deleted file mode 100644
index dd2b898814..0000000000
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
- recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
- recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
- recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
- recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
- componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
-)
-
-// MDStat holds info parsed from /proc/mdstat.
-type MDStat struct {
- // Name of the device.
- Name string
- // activity-state of the device.
- ActivityState string
- // Number of active disks.
- DisksActive int64
- // Total number of disks the device requires.
- DisksTotal int64
- // Number of failed disks.
- DisksFailed int64
- // Number of "down" disks. (the _ indicator in the status line)
- DisksDown int64
- // Spare disks in the device.
- DisksSpare int64
- // Number of blocks the device holds.
- BlocksTotal int64
- // Number of blocks on the device that are in sync.
- BlocksSynced int64
- // progress percentage of current sync
- BlocksSyncedPct float64
- // estimated finishing time for current sync (in minutes)
- BlocksSyncedFinishTime float64
- // current sync speed (in Kilobytes/sec)
- BlocksSyncedSpeed float64
- // Name of md component devices
- Devices []string
-}
-
-// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://raid.wiki.kernel.org/index.php/Mdstat
-func (fs FS) MDStat() ([]MDStat, error) {
- data, err := os.ReadFile(fs.proc.Path("mdstat"))
- if err != nil {
- return nil, err
- }
- mdstat, err := parseMDStat(data)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
- }
- return mdstat, nil
-}
-
-// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
-// structs containing the relevant info.
-func parseMDStat(mdStatData []byte) ([]MDStat, error) {
- mdStats := []MDStat{}
- lines := strings.Split(string(mdStatData), "\n")
-
- for i, line := range lines {
- if strings.TrimSpace(line) == "" || line[0] == ' ' ||
- strings.HasPrefix(line, "Personalities") ||
- strings.HasPrefix(line, "unused") {
- continue
- }
-
- deviceFields := strings.Fields(line)
- if len(deviceFields) < 3 {
- return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
- }
- mdName := deviceFields[0] // mdx
- state := deviceFields[2] // active or inactive
-
- if len(lines) <= i+3 {
- return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
- }
-
- // Failed disks have the suffix (F) & Spare disks have the suffix (S).
- fail := int64(strings.Count(line, "(F)"))
- spare := int64(strings.Count(line, "(S)"))
- active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
-
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
- }
-
- syncLineIdx := i + 2
- if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
- syncLineIdx++
- }
-
- // If device is syncing at the moment, get the number of currently
- // synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
- speed := float64(0)
- finish := float64(0)
- pct := float64(0)
- recovering := strings.Contains(lines[syncLineIdx], "recovery")
- resyncing := strings.Contains(lines[syncLineIdx], "resync")
- checking := strings.Contains(lines[syncLineIdx], "check")
-
- // Append recovery and resyncing state info.
- if recovering || resyncing || checking {
- if recovering {
- state = "recovering"
- } else if checking {
- state = "checking"
- } else {
- state = "resyncing"
- }
-
- // Handle case when resync=PENDING or resync=DELAYED.
- if strings.Contains(lines[syncLineIdx], "PENDING") ||
- strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
- } else {
- syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
- }
- }
- }
-
- mdStats = append(mdStats, MDStat{
- Name: mdName,
- ActivityState: state,
- DisksActive: active,
- DisksFailed: fail,
- DisksDown: down,
- DisksSpare: spare,
- DisksTotal: total,
- BlocksTotal: size,
- BlocksSynced: syncedBlocks,
- BlocksSyncedPct: pct,
- BlocksSyncedFinishTime: finish,
- BlocksSyncedSpeed: speed,
- Devices: evalComponentDevices(deviceFields),
- })
- }
-
- return mdStats, nil
-}
-
-func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
- statusFields := strings.Fields(statusLine)
- if len(statusFields) < 1 {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- sizeStr := statusFields[0]
- size, err = strconv.ParseInt(sizeStr, 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
- // In the device deviceLine, only disks have a number associated with them in [].
- total = int64(strings.Count(deviceLine, "["))
- return total, total, 0, size, nil
- }
-
- if strings.Contains(deviceLine, "inactive") {
- return 0, 0, 0, size, nil
- }
-
- matches := statusLineRE.FindStringSubmatch(statusLine)
- if len(matches) != 5 {
- return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
- }
-
- total, err = strconv.ParseInt(matches[2], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
- }
-
- active, err = strconv.ParseInt(matches[3], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
- }
- down = int64(strings.Count(matches[4], "_"))
-
- return active, total, down, size, nil
-}
-
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
- matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
- }
-
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
- }
-
- // Get percentage complete
- matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
- }
- pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
- if err != nil {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
- }
-
- // Get time expected left to complete
- matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
- }
- finish, err = strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
- }
-
- // Get recovery speed
- matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
- if len(matches) != 2 {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
- }
- speed, err = strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
- }
-
- return syncedBlocks, pct, finish, speed, nil
-}
-
-func evalComponentDevices(deviceFields []string) []string {
- mdComponentDevices := make([]string, 0)
- if len(deviceFields) > 3 {
- for _, field := range deviceFields[4:] {
- match := componentDeviceRE.FindStringSubmatch(field)
- if match == nil {
- continue
- }
- mdComponentDevices = append(mdComponentDevices, match[1])
- }
- }
-
- return mdComponentDevices
-}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
deleted file mode 100644
index 4b2c4050a3..0000000000
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Meminfo represents memory statistics.
-type Meminfo struct {
- // Total usable ram (i.e. physical ram minus a few reserved
- // bits and the kernel binary code)
- MemTotal *uint64
- // The sum of LowFree+HighFree
- MemFree *uint64
- // An estimate of how much memory is available for starting
- // new applications, without swapping. Calculated from
- // MemFree, SReclaimable, the size of the file LRU lists, and
- // the low watermarks in each zone. The estimate takes into
- // account that the system needs some page cache to function
- // well, and that not all reclaimable slab will be
- // reclaimable, due to items being in use. The impact of those
- // factors will vary from system to system.
- MemAvailable *uint64
- // Relatively temporary storage for raw disk blocks shouldn't
- // get tremendously large (20MB or so)
- Buffers *uint64
- Cached *uint64
- // Memory that once was swapped out, is swapped back in but
- // still also is in the swapfile (if memory is needed it
- // doesn't need to be swapped out AGAIN because it is already
- // in the swapfile. This saves I/O)
- SwapCached *uint64
- // Memory that has been used more recently and usually not
- // reclaimed unless absolutely necessary.
- Active *uint64
- // Memory which has been less recently used. It is more
- // eligible to be reclaimed for other purposes
- Inactive *uint64
- ActiveAnon *uint64
- InactiveAnon *uint64
- ActiveFile *uint64
- InactiveFile *uint64
- Unevictable *uint64
- Mlocked *uint64
- // total amount of swap space available
- SwapTotal *uint64
- // Memory which has been evicted from RAM, and is temporarily
- // on the disk
- SwapFree *uint64
- // Memory which is waiting to get written back to the disk
- Dirty *uint64
- // Memory which is actively being written back to the disk
- Writeback *uint64
- // Non-file backed pages mapped into userspace page tables
- AnonPages *uint64
- // files which have been mapped, such as libraries
- Mapped *uint64
- Shmem *uint64
- // in-kernel data structures cache
- Slab *uint64
- // Part of Slab, that might be reclaimed, such as caches
- SReclaimable *uint64
- // Part of Slab, that cannot be reclaimed on memory pressure
- SUnreclaim *uint64
- KernelStack *uint64
- // amount of memory dedicated to the lowest level of page
- // tables.
- PageTables *uint64
- // NFS pages sent to the server, but not yet committed to
- // stable storage
- NFSUnstable *uint64
- // Memory used for block device "bounce buffers"
- Bounce *uint64
- // Memory used by FUSE for temporary writeback buffers
- WritebackTmp *uint64
- // Based on the overcommit ratio ('vm.overcommit_ratio'),
- // this is the total amount of memory currently available to
- // be allocated on the system. This limit is only adhered to
- // if strict overcommit accounting is enabled (mode 2 in
- // 'vm.overcommit_memory').
- // The CommitLimit is calculated with the following formula:
- // CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
- // overcommit_ratio / 100 + [total swap pages]
- // For example, on a system with 1G of physical RAM and 7G
- // of swap with a `vm.overcommit_ratio` of 30 it would
- // yield a CommitLimit of 7.3G.
- // For more details, see the memory overcommit documentation
- // in vm/overcommit-accounting.
- CommitLimit *uint64
- // The amount of memory presently allocated on the system.
- // The committed memory is a sum of all of the memory which
- // has been allocated by processes, even if it has not been
- // "used" by them as of yet. A process which malloc()'s 1G
- // of memory, but only touches 300M of it will show up as
- // using 1G. This 1G is memory which has been "committed" to
- // by the VM and can be used at any time by the allocating
- // application. With strict overcommit enabled on the system
- // (mode 2 in 'vm.overcommit_memory'),allocations which would
- // exceed the CommitLimit (detailed above) will not be permitted.
- // This is useful if one needs to guarantee that processes will
- // not fail due to lack of memory once that memory has been
- // successfully allocated.
- CommittedAS *uint64
- // total size of vmalloc memory area
- VmallocTotal *uint64
- // amount of vmalloc area which is used
- VmallocUsed *uint64
- // largest contiguous block of vmalloc area which is free
- VmallocChunk *uint64
- Percpu *uint64
- HardwareCorrupted *uint64
- AnonHugePages *uint64
- ShmemHugePages *uint64
- ShmemPmdMapped *uint64
- CmaTotal *uint64
- CmaFree *uint64
- HugePagesTotal *uint64
- HugePagesFree *uint64
- HugePagesRsvd *uint64
- HugePagesSurp *uint64
- Hugepagesize *uint64
- DirectMap4k *uint64
- DirectMap2M *uint64
- DirectMap1G *uint64
-
- // The struct fields below are the byte-normalized counterparts to the
- // existing struct fields. Values are normalized using the optional
- // unit field in the meminfo line.
- MemTotalBytes *uint64
- MemFreeBytes *uint64
- MemAvailableBytes *uint64
- BuffersBytes *uint64
- CachedBytes *uint64
- SwapCachedBytes *uint64
- ActiveBytes *uint64
- InactiveBytes *uint64
- ActiveAnonBytes *uint64
- InactiveAnonBytes *uint64
- ActiveFileBytes *uint64
- InactiveFileBytes *uint64
- UnevictableBytes *uint64
- MlockedBytes *uint64
- SwapTotalBytes *uint64
- SwapFreeBytes *uint64
- DirtyBytes *uint64
- WritebackBytes *uint64
- AnonPagesBytes *uint64
- MappedBytes *uint64
- ShmemBytes *uint64
- SlabBytes *uint64
- SReclaimableBytes *uint64
- SUnreclaimBytes *uint64
- KernelStackBytes *uint64
- PageTablesBytes *uint64
- NFSUnstableBytes *uint64
- BounceBytes *uint64
- WritebackTmpBytes *uint64
- CommitLimitBytes *uint64
- CommittedASBytes *uint64
- VmallocTotalBytes *uint64
- VmallocUsedBytes *uint64
- VmallocChunkBytes *uint64
- PercpuBytes *uint64
- HardwareCorruptedBytes *uint64
- AnonHugePagesBytes *uint64
- ShmemHugePagesBytes *uint64
- ShmemPmdMappedBytes *uint64
- CmaTotalBytes *uint64
- CmaFreeBytes *uint64
- HugepagesizeBytes *uint64
- DirectMap4kBytes *uint64
- DirectMap2MBytes *uint64
- DirectMap1GBytes *uint64
-}
-
-// Meminfo returns an information about current kernel/system memory statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Meminfo() (Meminfo, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
- if err != nil {
- return Meminfo{}, err
- }
-
- m, err := parseMemInfo(bytes.NewReader(b))
- if err != nil {
- return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
- }
-
- return *m, nil
-}
-
-func parseMemInfo(r io.Reader) (*Meminfo, error) {
- var m Meminfo
- s := bufio.NewScanner(r)
- for s.Scan() {
- fields := strings.Fields(s.Text())
- var val, valBytes uint64
-
- val, err := strconv.ParseUint(fields[1], 0, 64)
- if err != nil {
- return nil, err
- }
-
- switch len(fields) {
- case 2:
- // No unit present, use the parsed the value as bytes directly.
- valBytes = val
- case 3:
- // Unit present in optional 3rd field, convert it to
- // bytes. The only unit supported within the Linux
- // kernel is `kB`.
- if fields[2] != "kB" {
- return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
- }
-
- valBytes = 1024 * val
-
- default:
- return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
- }
-
- switch fields[0] {
- case "MemTotal:":
- m.MemTotal = &val
- m.MemTotalBytes = &valBytes
- case "MemFree:":
- m.MemFree = &val
- m.MemFreeBytes = &valBytes
- case "MemAvailable:":
- m.MemAvailable = &val
- m.MemAvailableBytes = &valBytes
- case "Buffers:":
- m.Buffers = &val
- m.BuffersBytes = &valBytes
- case "Cached:":
- m.Cached = &val
- m.CachedBytes = &valBytes
- case "SwapCached:":
- m.SwapCached = &val
- m.SwapCachedBytes = &valBytes
- case "Active:":
- m.Active = &val
- m.ActiveBytes = &valBytes
- case "Inactive:":
- m.Inactive = &val
- m.InactiveBytes = &valBytes
- case "Active(anon):":
- m.ActiveAnon = &val
- m.ActiveAnonBytes = &valBytes
- case "Inactive(anon):":
- m.InactiveAnon = &val
- m.InactiveAnonBytes = &valBytes
- case "Active(file):":
- m.ActiveFile = &val
- m.ActiveFileBytes = &valBytes
- case "Inactive(file):":
- m.InactiveFile = &val
- m.InactiveFileBytes = &valBytes
- case "Unevictable:":
- m.Unevictable = &val
- m.UnevictableBytes = &valBytes
- case "Mlocked:":
- m.Mlocked = &val
- m.MlockedBytes = &valBytes
- case "SwapTotal:":
- m.SwapTotal = &val
- m.SwapTotalBytes = &valBytes
- case "SwapFree:":
- m.SwapFree = &val
- m.SwapFreeBytes = &valBytes
- case "Dirty:":
- m.Dirty = &val
- m.DirtyBytes = &valBytes
- case "Writeback:":
- m.Writeback = &val
- m.WritebackBytes = &valBytes
- case "AnonPages:":
- m.AnonPages = &val
- m.AnonPagesBytes = &valBytes
- case "Mapped:":
- m.Mapped = &val
- m.MappedBytes = &valBytes
- case "Shmem:":
- m.Shmem = &val
- m.ShmemBytes = &valBytes
- case "Slab:":
- m.Slab = &val
- m.SlabBytes = &valBytes
- case "SReclaimable:":
- m.SReclaimable = &val
- m.SReclaimableBytes = &valBytes
- case "SUnreclaim:":
- m.SUnreclaim = &val
- m.SUnreclaimBytes = &valBytes
- case "KernelStack:":
- m.KernelStack = &val
- m.KernelStackBytes = &valBytes
- case "PageTables:":
- m.PageTables = &val
- m.PageTablesBytes = &valBytes
- case "NFS_Unstable:":
- m.NFSUnstable = &val
- m.NFSUnstableBytes = &valBytes
- case "Bounce:":
- m.Bounce = &val
- m.BounceBytes = &valBytes
- case "WritebackTmp:":
- m.WritebackTmp = &val
- m.WritebackTmpBytes = &valBytes
- case "CommitLimit:":
- m.CommitLimit = &val
- m.CommitLimitBytes = &valBytes
- case "Committed_AS:":
- m.CommittedAS = &val
- m.CommittedASBytes = &valBytes
- case "VmallocTotal:":
- m.VmallocTotal = &val
- m.VmallocTotalBytes = &valBytes
- case "VmallocUsed:":
- m.VmallocUsed = &val
- m.VmallocUsedBytes = &valBytes
- case "VmallocChunk:":
- m.VmallocChunk = &val
- m.VmallocChunkBytes = &valBytes
- case "Percpu:":
- m.Percpu = &val
- m.PercpuBytes = &valBytes
- case "HardwareCorrupted:":
- m.HardwareCorrupted = &val
- m.HardwareCorruptedBytes = &valBytes
- case "AnonHugePages:":
- m.AnonHugePages = &val
- m.AnonHugePagesBytes = &valBytes
- case "ShmemHugePages:":
- m.ShmemHugePages = &val
- m.ShmemHugePagesBytes = &valBytes
- case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &val
- m.ShmemPmdMappedBytes = &valBytes
- case "CmaTotal:":
- m.CmaTotal = &val
- m.CmaTotalBytes = &valBytes
- case "CmaFree:":
- m.CmaFree = &val
- m.CmaFreeBytes = &valBytes
- case "HugePages_Total:":
- m.HugePagesTotal = &val
- case "HugePages_Free:":
- m.HugePagesFree = &val
- case "HugePages_Rsvd:":
- m.HugePagesRsvd = &val
- case "HugePages_Surp:":
- m.HugePagesSurp = &val
- case "Hugepagesize:":
- m.Hugepagesize = &val
- m.HugepagesizeBytes = &valBytes
- case "DirectMap4k:":
- m.DirectMap4k = &val
- m.DirectMap4kBytes = &valBytes
- case "DirectMap2M:":
- m.DirectMap2M = &val
- m.DirectMap2MBytes = &valBytes
- case "DirectMap1G:":
- m.DirectMap1G = &val
- m.DirectMap1GBytes = &valBytes
- }
- }
-
- return &m, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
deleted file mode 100644
index a704c5e735..0000000000
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A MountInfo is a type that describes the details, options
-// for each mount, parsed from /proc/self/mountinfo.
-// The fields described in each entry of /proc/self/mountinfo
-// is described in the following man page.
-// http://man7.org/linux/man-pages/man5/proc.5.html
-type MountInfo struct {
- // Unique ID for the mount
- MountID int
- // The ID of the parent mount
- ParentID int
- // The value of `st_dev` for the files on this FS
- MajorMinorVer string
- // The pathname of the directory in the FS that forms
- // the root for this mount
- Root string
- // The pathname of the mount point relative to the root
- MountPoint string
- // Mount options
- Options map[string]string
- // Zero or more optional fields
- OptionalFields map[string]string
- // The Filesystem type
- FSType string
- // FS specific information or "none"
- Source string
- // Superblock options
- SuperOptions map[string]string
-}
-
-// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
-func parseMountInfo(info []byte) ([]*MountInfo, error) {
- mounts := []*MountInfo{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseMountInfoString(mountString)
- if err != nil {
- return nil, err
- }
- mounts = append(mounts, parsedMounts)
- }
-
- err := scanner.Err()
- return mounts, err
-}
-
-// Parses a mountinfo file line, and converts it to a MountInfo struct.
-// An important check here is to see if the hyphen separator, as if it does not exist,
-// it means that the line is malformed.
-func parseMountInfoString(mountString string) (*MountInfo, error) {
- var err error
-
- mountInfo := strings.Split(mountString, " ")
- mountInfoLength := len(mountInfo)
- if mountInfoLength < 10 {
- return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
- }
-
- if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
- }
-
- mount := &MountInfo{
- MajorMinorVer: mountInfo[2],
- Root: mountInfo[3],
- MountPoint: mountInfo[4],
- Options: mountOptionsParser(mountInfo[5]),
- OptionalFields: nil,
- FSType: mountInfo[mountInfoLength-3],
- Source: mountInfo[mountInfoLength-2],
- SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
- }
-
- mount.MountID, err = strconv.Atoi(mountInfo[0])
- if err != nil {
- return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
- }
- mount.ParentID, err = strconv.Atoi(mountInfo[1])
- if err != nil {
- return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
- }
- // Has optional fields, which is a space separated list of values.
- // Example: shared:2 master:7
- if mountInfo[6] != "" {
- mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
- if err != nil {
- return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
- }
- }
- return mount, nil
-}
-
-// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
-func mountOptionsIsValidField(s string) bool {
- switch s {
- case
- "shared",
- "master",
- "propagate_from",
- "unbindable":
- return true
- }
- return false
-}
-
-// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
-func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
- optionalFields := make(map[string]string)
- for _, field := range o {
- optionSplit := strings.SplitN(field, ":", 2)
- value := ""
- if len(optionSplit) == 2 {
- value = optionSplit[1]
- }
- if mountOptionsIsValidField(optionSplit[0]) {
- optionalFields[optionSplit[0]] = value
- }
- }
- return optionalFields, nil
-}
-
-// mountOptionsParser parses the mount options, superblock options.
-func mountOptionsParser(mountOptions string) map[string]string {
- opts := make(map[string]string)
- options := strings.Split(mountOptions, ",")
- for _, opt := range options {
- splitOption := strings.Split(opt, "=")
- if len(splitOption) < 2 {
- key := splitOption[0]
- opts[key] = ""
- } else {
- key, value := splitOption[0], splitOption[1]
- opts[key] = value
- }
- }
- return opts
-}
-
-// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
-func GetMounts() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`.
-func GetProcMounts(pid int) ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
deleted file mode 100644
index 2f54e77c75..0000000000
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ /dev/null
@@ -1,707 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// While implementing parsing of /proc/[pid]/mountstats, this blog was used
-// heavily as a reference:
-// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
-//
-// Special thanks to Chris Siebenmann for all of his posts explaining the
-// various statistics available for NFS.
-
-import (
- "bufio"
- "fmt"
- "io"
- "strconv"
- "strings"
- "time"
-)
-
-// Constants shared between multiple functions.
-const (
- deviceEntryLen = 8
-
- fieldBytesLen = 8
- fieldEventsLen = 27
-
- statVersion10 = "1.0"
- statVersion11 = "1.1"
-
- fieldTransport10TCPLen = 10
- fieldTransport10UDPLen = 7
-
- fieldTransport11TCPLen = 13
- fieldTransport11UDPLen = 10
-
- // kernel version >= 4.14 MaxLen
- // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
- fieldTransport11RDMAMaxLen = 28
-
- // kernel version <= 4.2 MinLen
- // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
- fieldTransport11RDMAMinLen = 20
-)
-
-// A Mount is a device mount parsed from /proc/[pid]/mountstats.
-type Mount struct {
- // Name of the device.
- Device string
- // The mount point of the device.
- Mount string
- // The filesystem type used by the device.
- Type string
- // If available additional statistics related to this Mount.
- // Use a type assertion to determine if additional statistics are available.
- Stats MountStats
-}
-
-// A MountStats is a type which contains detailed statistics for a specific
-// type of Mount.
-type MountStats interface {
- mountStats()
-}
-
-// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
-type MountStatsNFS struct {
- // The version of statistics provided.
- StatVersion string
- // The mount options of the NFS mount.
- Opts map[string]string
- // The age of the NFS mount.
- Age time.Duration
- // Statistics related to byte counters for various operations.
- Bytes NFSBytesStats
- // Statistics related to various NFS event occurrences.
- Events NFSEventsStats
- // Statistics broken down by filesystem operation.
- Operations []NFSOperationStats
- // Statistics about the NFS RPC transport.
- Transport NFSTransportStats
-}
-
-// mountStats implements MountStats.
-func (m MountStatsNFS) mountStats() {}
-
-// A NFSBytesStats contains statistics about the number of bytes read and written
-// by an NFS client to and from an NFS server.
-type NFSBytesStats struct {
- // Number of bytes read using the read() syscall.
- Read uint64
- // Number of bytes written using the write() syscall.
- Write uint64
- // Number of bytes read using the read() syscall in O_DIRECT mode.
- DirectRead uint64
- // Number of bytes written using the write() syscall in O_DIRECT mode.
- DirectWrite uint64
- // Number of bytes read from the NFS server, in total.
- ReadTotal uint64
- // Number of bytes written to the NFS server, in total.
- WriteTotal uint64
- // Number of pages read directly via mmap()'d files.
- ReadPages uint64
- // Number of pages written directly via mmap()'d files.
- WritePages uint64
-}
-
-// A NFSEventsStats contains statistics about NFS event occurrences.
-type NFSEventsStats struct {
- // Number of times cached inode attributes are re-validated from the server.
- InodeRevalidate uint64
- // Number of times cached dentry nodes are re-validated from the server.
- DnodeRevalidate uint64
- // Number of times an inode cache is cleared.
- DataInvalidate uint64
- // Number of times cached inode attributes are invalidated.
- AttributeInvalidate uint64
- // Number of times files or directories have been open()'d.
- VFSOpen uint64
- // Number of times a directory lookup has occurred.
- VFSLookup uint64
- // Number of times permissions have been checked.
- VFSAccess uint64
- // Number of updates (and potential writes) to pages.
- VFSUpdatePage uint64
- // Number of pages read directly via mmap()'d files.
- VFSReadPage uint64
- // Number of times a group of pages have been read.
- VFSReadPages uint64
- // Number of pages written directly via mmap()'d files.
- VFSWritePage uint64
- // Number of times a group of pages have been written.
- VFSWritePages uint64
- // Number of times directory entries have been read with getdents().
- VFSGetdents uint64
- // Number of times attributes have been set on inodes.
- VFSSetattr uint64
- // Number of pending writes that have been forcefully flushed to the server.
- VFSFlush uint64
- // Number of times fsync() has been called on directories and files.
- VFSFsync uint64
- // Number of times locking has been attempted on a file.
- VFSLock uint64
- // Number of times files have been closed and released.
- VFSFileRelease uint64
- // Unknown. Possibly unused.
- CongestionWait uint64
- // Number of times files have been truncated.
- Truncation uint64
- // Number of times a file has been grown due to writes beyond its existing end.
- WriteExtension uint64
- // Number of times a file was removed while still open by another process.
- SillyRename uint64
- // Number of times the NFS server gave less data than expected while reading.
- ShortRead uint64
- // Number of times the NFS server wrote less data than expected while writing.
- ShortWrite uint64
- // Number of times the NFS server indicated EJUKEBOX; retrieving data from
- // offline storage.
- JukeboxDelay uint64
- // Number of NFS v4.1+ pNFS reads.
- PNFSRead uint64
- // Number of NFS v4.1+ pNFS writes.
- PNFSWrite uint64
-}
-
-// A NFSOperationStats contains statistics for a single operation.
-type NFSOperationStats struct {
- // The name of the operation.
- Operation string
- // Number of requests performed for this operation.
- Requests uint64
- // Number of times an actual RPC request has been transmitted for this operation.
- Transmissions uint64
- // Number of times a request has had a major timeout.
- MajorTimeouts uint64
- // Number of bytes sent for this operation, including RPC headers and payload.
- BytesSent uint64
- // Number of bytes received for this operation, including RPC headers and payload.
- BytesReceived uint64
- // Duration all requests spent queued for transmission before they were sent.
- CumulativeQueueMilliseconds uint64
- // Duration it took to get a reply back after the request was transmitted.
- CumulativeTotalResponseMilliseconds uint64
- // Duration from when a request was enqueued to when it was completely handled.
- CumulativeTotalRequestMilliseconds uint64
- // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
- Errors uint64
-}
-
-// A NFSTransportStats contains statistics for the NFS mount RPC requests and
-// responses.
-type NFSTransportStats struct {
- // The transport protocol used for the NFS mount.
- Protocol string
- // The local port used for the NFS mount.
- Port uint64
- // Number of times the client has had to establish a connection from scratch
- // to the NFS server.
- Bind uint64
- // Number of times the client has made a TCP connection to the NFS server.
- Connect uint64
- // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
- // spent waiting for connections to the server to be established.
- ConnectIdleTime uint64
- // Duration since the NFS mount last saw any RPC traffic.
- IdleTimeSeconds uint64
- // Number of RPC requests for this mount sent to the NFS server.
- Sends uint64
- // Number of RPC responses for this mount received from the NFS server.
- Receives uint64
- // Number of times the NFS server sent a response with a transaction ID
- // unknown to this client.
- BadTransactionIDs uint64
- // A running counter, incremented on each request as the current difference
- // ebetween sends and receives.
- CumulativeActiveRequests uint64
- // A running counter, incremented on each request by the current backlog
- // queue size.
- CumulativeBacklog uint64
-
- // Stats below only available with stat version 1.1.
-
- // Maximum number of simultaneously active RPC requests ever used.
- MaximumRPCSlotsUsed uint64
- // A running counter, incremented on each request as the current size of the
- // sending queue.
- CumulativeSendingQueue uint64
- // A running counter, incremented on each request as the current size of the
- // pending queue.
- CumulativePendingQueue uint64
-
- // Stats below only available with stat version 1.1.
- // Transport over RDMA
-
- // accessed when sending a call
- ReadChunkCount uint64
- WriteChunkCount uint64
- ReplyChunkCount uint64
- TotalRdmaRequest uint64
-
- // rarely accessed error counters
- PullupCopyCount uint64
- HardwayRegisterCount uint64
- FailedMarshalCount uint64
- BadReplyCount uint64
- MrsRecovered uint64
- MrsOrphaned uint64
- MrsAllocated uint64
- EmptySendctxQ uint64
-
- // accessed when receiving a reply
- TotalRdmaReply uint64
- FixupCopyCount uint64
- ReplyWaitsForSend uint64
- LocalInvNeeded uint64
- NomsgCallCount uint64
- BcallCount uint64
-}
-
-// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
-// of Mount structures containing detailed information about each mount.
-// If available, statistics for each mount are parsed as well.
-func parseMountStats(r io.Reader) ([]*Mount, error) {
- const (
- device = "device"
- statVersionPrefix = "statvers="
-
- nfs3Type = "nfs"
- nfs4Type = "nfs4"
- )
-
- var mounts []*Mount
-
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Only look for device entries in this function
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 || ss[0] != device {
- continue
- }
-
- m, err := parseMount(ss)
- if err != nil {
- return nil, err
- }
-
- // Does this mount also possess statistics information?
- if len(ss) > deviceEntryLen {
- // Only NFSv3 and v4 are supported for parsing statistics
- if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
- }
-
- statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
-
- stats, err := parseMountStatsNFS(s, statVersion)
- if err != nil {
- return nil, err
- }
-
- m.Stats = stats
- }
-
- mounts = append(mounts, m)
- }
-
- return mounts, s.Err()
-}
-
-// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-//
-// device [device] mounted on [mount] with fstype [type]
-func parseMount(ss []string) (*Mount, error) {
- if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
- }
-
- // Check for specific words appearing at specific indices to ensure
- // the format is consistent with what we expect
- format := []struct {
- i int
- s string
- }{
- {i: 0, s: "device"},
- {i: 2, s: "mounted"},
- {i: 3, s: "on"},
- {i: 5, s: "with"},
- {i: 6, s: "fstype"},
- }
-
- for _, f := range format {
- if ss[f.i] != f.s {
- return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
- }
- }
-
- return &Mount{
- Device: ss[1],
- Mount: ss[4],
- Type: ss[7],
- }, nil
-}
-
-// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
-// related to NFS statistics.
-func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
- // Field indicators for parsing specific types of data
- const (
- fieldOpts = "opts:"
- fieldAge = "age:"
- fieldBytes = "bytes:"
- fieldEvents = "events:"
- fieldPerOpStats = "per-op"
- fieldTransport = "xprt:"
- )
-
- stats := &MountStatsNFS{
- StatVersion: statVersion,
- }
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- break
- }
-
- switch ss[0] {
- case fieldOpts:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- if stats.Opts == nil {
- stats.Opts = map[string]string{}
- }
- for _, opt := range strings.Split(ss[1], ",") {
- split := strings.Split(opt, "=")
- if len(split) == 2 {
- stats.Opts[split[0]] = split[1]
- } else {
- stats.Opts[opt] = ""
- }
- }
- case fieldAge:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- // Age integer is in seconds
- d, err := time.ParseDuration(ss[1] + "s")
- if err != nil {
- return nil, err
- }
-
- stats.Age = d
- case fieldBytes:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
- }
- bstats, err := parseNFSBytesStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Bytes = *bstats
- case fieldEvents:
- if len(ss) < 2 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
- }
- estats, err := parseNFSEventsStats(ss[1:])
- if err != nil {
- return nil, err
- }
-
- stats.Events = *estats
- case fieldTransport:
- if len(ss) < 3 {
- return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
- }
-
- tstats, err := parseNFSTransportStats(ss[1:], statVersion)
- if err != nil {
- return nil, err
- }
-
- stats.Transport = *tstats
- }
-
- // When encountering "per-operation statistics", we must break this
- // loop and parse them separately to ensure we can terminate parsing
- // before reaching another device entry; hence why this 'if' statement
- // is not just another switch case
- if ss[0] == fieldPerOpStats {
- break
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- // NFS per-operation stats appear last before the next device entry
- perOpStats, err := parseNFSOperationStats(s)
- if err != nil {
- return nil, err
- }
-
- stats.Operations = perOpStats
-
- return stats, nil
-}
-
-// parseNFSBytesStats parses a NFSBytesStats line using an input set of
-// integer fields.
-func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
- if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
- }
-
- ns := make([]uint64, 0, fieldBytesLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSBytesStats{
- Read: ns[0],
- Write: ns[1],
- DirectRead: ns[2],
- DirectWrite: ns[3],
- ReadTotal: ns[4],
- WriteTotal: ns[5],
- ReadPages: ns[6],
- WritePages: ns[7],
- }, nil
-}
-
-// parseNFSEventsStats parses a NFSEventsStats line using an input set of
-// integer fields.
-func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
- if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
- }
-
- ns := make([]uint64, 0, fieldEventsLen)
- for _, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
-
- return &NFSEventsStats{
- InodeRevalidate: ns[0],
- DnodeRevalidate: ns[1],
- DataInvalidate: ns[2],
- AttributeInvalidate: ns[3],
- VFSOpen: ns[4],
- VFSLookup: ns[5],
- VFSAccess: ns[6],
- VFSUpdatePage: ns[7],
- VFSReadPage: ns[8],
- VFSReadPages: ns[9],
- VFSWritePage: ns[10],
- VFSWritePages: ns[11],
- VFSGetdents: ns[12],
- VFSSetattr: ns[13],
- VFSFlush: ns[14],
- VFSFsync: ns[15],
- VFSLock: ns[16],
- VFSFileRelease: ns[17],
- CongestionWait: ns[18],
- Truncation: ns[19],
- WriteExtension: ns[20],
- SillyRename: ns[21],
- ShortRead: ns[22],
- ShortWrite: ns[23],
- JukeboxDelay: ns[24],
- PNFSRead: ns[25],
- PNFSWrite: ns[26],
- }, nil
-}
-
-// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
-// additional information about per-operation statistics until an empty
-// line is reached.
-func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
- const (
- // Minimum number of expected fields in each per-operation statistics set
- minFields = 9
- )
-
- var ops []NFSOperationStats
-
- for s.Scan() {
- ss := strings.Fields(string(s.Bytes()))
- if len(ss) == 0 {
- // Must break when reading a blank line after per-operation stats to
- // enable top-level function to parse the next device entry
- break
- }
-
- if len(ss) < minFields {
- return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
- }
-
- // Skip string operation name for integers
- ns := make([]uint64, 0, minFields-1)
- for _, st := range ss[1:] {
- n, err := strconv.ParseUint(st, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns = append(ns, n)
- }
- opStats := NFSOperationStats{
- Operation: strings.TrimSuffix(ss[0], ":"),
- Requests: ns[0],
- Transmissions: ns[1],
- MajorTimeouts: ns[2],
- BytesSent: ns[3],
- BytesReceived: ns[4],
- CumulativeQueueMilliseconds: ns[5],
- CumulativeTotalResponseMilliseconds: ns[6],
- CumulativeTotalRequestMilliseconds: ns[7],
- }
-
- if len(ns) > 8 {
- opStats.Errors = ns[8]
- }
-
- ops = append(ops, opStats)
- }
-
- return ops, s.Err()
-}
-
-// parseNFSTransportStats parses a NFSTransportStats line using an input set of
-// integer fields matched to a specific stats version.
-func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
- // Extract the protocol field. It is the only string value in the line
- protocol := ss[0]
- ss = ss[1:]
-
- switch statVersion {
- case statVersion10:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport10UDPLen
- } else {
- return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
- }
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
- }
- case statVersion11:
- var expectedLength int
- if protocol == "tcp" {
- expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
- expectedLength = fieldTransport11UDPLen
- } else if protocol == "rdma" {
- expectedLength = fieldTransport11RDMAMinLen
- } else {
- return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
- }
- if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
- (protocol == "rdma" && len(ss) < expectedLength) {
- return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
- }
- default:
- return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
- }
-
- // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
- // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
- // the TCP length here.
- //
- // Note: slice length must be set to length of v1.1 stats to avoid a panic when
- // only v1.0 stats are present.
- // See: https://github.com/prometheus/node_exporter/issues/571.
- //
- // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
- ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
- for i, s := range ss {
- n, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- ns[i] = n
- }
-
- // The fields differ depending on the transport protocol (TCP or UDP)
- // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
- //
- // For the udp RPC transport there is no connection count, connect idle time,
- // or idle time (fields #3, #4, and #5); all other fields are the same. So
- // we set them to 0 here.
- if protocol == "udp" {
- ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- } else if protocol == "tcp" {
- ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
- } else if protocol == "rdma" {
- ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
- }
-
- return &NFSTransportStats{
- // NFS xprt over tcp or udp
- Protocol: protocol,
- Port: ns[0],
- Bind: ns[1],
- Connect: ns[2],
- ConnectIdleTime: ns[3],
- IdleTimeSeconds: ns[4],
- Sends: ns[5],
- Receives: ns[6],
- BadTransactionIDs: ns[7],
- CumulativeActiveRequests: ns[8],
- CumulativeBacklog: ns[9],
-
- // NFS xprt over tcp or udp
- // And statVersion 1.1
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
-
- // NFS xprt over rdma
- // And stat Version 1.1
- ReadChunkCount: ns[13],
- WriteChunkCount: ns[14],
- ReplyChunkCount: ns[15],
- TotalRdmaRequest: ns[16],
- PullupCopyCount: ns[17],
- HardwayRegisterCount: ns[18],
- FailedMarshalCount: ns[19],
- BadReplyCount: ns[20],
- MrsRecovered: ns[21],
- MrsOrphaned: ns[22],
- MrsAllocated: ns[23],
- EmptySendctxQ: ns[24],
- TotalRdmaReply: ns[25],
- FixupCopyCount: ns[26],
- ReplyWaitsForSend: ns[27],
- LocalInvNeeded: ns[28],
- NomsgCallCount: ns[29],
- BcallCount: ns[30],
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
deleted file mode 100644
index 316df5fbb7..0000000000
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core.
-type ConntrackStatEntry struct {
- Entries uint64
- Searched uint64
- Found uint64
- New uint64
- Invalid uint64
- Ignore uint64
- Delete uint64
- DeleteList uint64
- Insert uint64
- InsertFailed uint64
- Drop uint64
- EarlyDrop uint64
- SearchRestart uint64
-}
-
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
-func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
- return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
-}
-
-// Parses a slice of ConntrackStatEntries from the given filepath.
-func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseConntrackStat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
- }
-
- return stat, nil
-}
-
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
-func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
- var entries []ConntrackStatEntry
-
- scanner := bufio.NewScanner(r)
- scanner.Scan()
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- conntrackEntry, err := parseConntrackStatEntry(fields)
- if err != nil {
- return nil, err
- }
- entries = append(entries, *conntrackEntry)
- }
-
- return entries, nil
-}
-
-// Parses a ConntrackStatEntry from given array of fields.
-func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- entries, err := util.ParseHexUint64s(fields)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
- }
- numEntries := len(entries)
- if numEntries < 16 || numEntries > 17 {
- return nil,
- fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
- }
-
- stats := &ConntrackStatEntry{
- Entries: *entries[0],
- Searched: *entries[1],
- Found: *entries[2],
- New: *entries[3],
- Invalid: *entries[4],
- Ignore: *entries[5],
- Delete: *entries[6],
- DeleteList: *entries[7],
- Insert: *entries[8],
- InsertFailed: *entries[9],
- Drop: *entries[10],
- EarlyDrop: *entries[11],
- }
-
- // Ignore missing search_restart on Linux < 2.6.35.
- if numEntries == 17 {
- stats.SearchRestart = *entries[16]
- }
-
- return stats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
deleted file mode 100644
index e66208aa05..0000000000
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
-type NetDevLine struct {
- Name string `json:"name"` // The name of the interface.
- RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
- RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
- RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
- RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
- RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
- RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
- RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
- RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
- TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
- TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
- TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
- TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
- TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
- TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
- TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
- TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
-}
-
-// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
-// are interface names.
-type NetDev map[string]NetDevLine
-
-// NetDev returns kernel/system statistics read from /proc/net/dev.
-func (fs FS) NetDev() (NetDev, error) {
- return newNetDev(fs.proc.Path("net/dev"))
-}
-
-// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
-func (p Proc) NetDev() (NetDev, error) {
- return newNetDev(p.path("net/dev"))
-}
-
-// newNetDev creates a new NetDev from the contents of the given file.
-func newNetDev(file string) (NetDev, error) {
- f, err := os.Open(file)
- if err != nil {
- return NetDev{}, err
- }
- defer f.Close()
-
- netDev := NetDev{}
- s := bufio.NewScanner(f)
- for n := 0; s.Scan(); n++ {
- // Skip the 2 header lines.
- if n < 2 {
- continue
- }
-
- line, err := netDev.parseLine(s.Text())
- if err != nil {
- return netDev, err
- }
-
- netDev[line.Name] = *line
- }
-
- return netDev, s.Err()
-}
-
-// parseLine parses a single line from the /proc/net/dev file. Header lines
-// must be filtered prior to calling this method.
-func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
- idx := strings.LastIndex(rawLine, ":")
- if idx == -1 {
- return nil, errors.New("invalid net/dev line, missing colon")
- }
- fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
-
- var err error
- line := &NetDevLine{}
-
- // Interface Name
- line.Name = strings.TrimSpace(rawLine[:idx])
- if line.Name == "" {
- return nil, errors.New("invalid net/dev line, empty interface name")
- }
-
- // RX
- line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
- if err != nil {
- return nil, err
- }
- line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
- if err != nil {
- return nil, err
- }
-
- // TX
- line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
- if err != nil {
- return nil, err
- }
- line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-// Total aggregates the values across interfaces and returns a new NetDevLine.
-// The Name field will be a sorted comma separated list of interface names.
-func (netDev NetDev) Total() NetDevLine {
- total := NetDevLine{}
-
- names := make([]string, 0, len(netDev))
- for _, ifc := range netDev {
- names = append(names, ifc.Name)
- total.RxBytes += ifc.RxBytes
- total.RxPackets += ifc.RxPackets
- total.RxErrors += ifc.RxErrors
- total.RxDropped += ifc.RxDropped
- total.RxFIFO += ifc.RxFIFO
- total.RxFrame += ifc.RxFrame
- total.RxCompressed += ifc.RxCompressed
- total.RxMulticast += ifc.RxMulticast
- total.TxBytes += ifc.TxBytes
- total.TxPackets += ifc.TxPackets
- total.TxErrors += ifc.TxErrors
- total.TxDropped += ifc.TxDropped
- total.TxFIFO += ifc.TxFIFO
- total.TxCollisions += ifc.TxCollisions
- total.TxCarrier += ifc.TxCarrier
- total.TxCompressed += ifc.TxCompressed
- }
- sort.Strings(names)
- total.Name = strings.Join(names, ", ")
-
- return total
-}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
deleted file mode 100644
index b70f1fc7a4..0000000000
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "encoding/hex"
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- // readLimit is used by io.LimitReader while reading the content of the
- // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
- // as each line represents a single used socket.
- // In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
- // With e.g. 150 Byte per line and the maximum number of 65535,
- // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
- readLimit = 4294967296 // Byte -> 4 GiB
-)
-
-// This contains generic data structures for both udp and tcp sockets.
-type (
- // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
- NetIPSocket []*netIPSocketLine
-
- // NetIPSocketSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetIPSocket it does not collect
- // the parsed lines into a slice.
- NetIPSocketSummary struct {
- // TxQueueLength shows the total queue length of all parsed tx_queue lengths.
- TxQueueLength uint64
- // RxQueueLength shows the total queue length of all parsed rx_queue lengths.
- RxQueueLength uint64
- // UsedSockets shows the total number of parsed lines representing the
- // number of used sockets.
- UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
- Drops *uint64
- }
-
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
- // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
- // For the proc file format details, see https://linux.die.net/man/5/proc.
- netIPSocketLine struct {
- Sl uint64
- LocalAddr net.IP
- LocalPort uint64
- RemAddr net.IP
- RemPort uint64
- St uint64
- TxQueue uint64
- RxQueue uint64
- UID uint64
- Inode uint64
- Drops *uint64
- }
-)
-
-func newNetIPSocket(file string) (NetIPSocket, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocket NetIPSocket
- isUDP := strings.Contains(file, "udp")
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields, isUDP)
- if err != nil {
- return nil, err
- }
- netIPSocket = append(netIPSocket, line)
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return netIPSocket, nil
-}
-
-// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
-func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var netIPSocketSummary NetIPSocketSummary
- var udpPacketDrops uint64
- isUDP := strings.Contains(file, "udp")
-
- lr := io.LimitReader(f, readLimit)
- s := bufio.NewScanner(lr)
- s.Scan() // skip first line with headers
- for s.Scan() {
- fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields, isUDP)
- if err != nil {
- return nil, err
- }
- netIPSocketSummary.TxQueueLength += line.TxQueue
- netIPSocketSummary.RxQueueLength += line.RxQueue
- netIPSocketSummary.UsedSockets++
- if isUDP {
- udpPacketDrops += *line.Drops
- netIPSocketSummary.Drops = &udpPacketDrops
- }
- }
- if err := s.Err(); err != nil {
- return nil, err
- }
- return &netIPSocketSummary, nil
-}
-
-// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
-
-func parseIP(hexIP string) (net.IP, error) {
- var byteIP []byte
- byteIP, err := hex.DecodeString(hexIP)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
- }
- switch len(byteIP) {
- case 4:
- return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
- case 16:
- i := net.IP{
- byteIP[3], byteIP[2], byteIP[1], byteIP[0],
- byteIP[7], byteIP[6], byteIP[5], byteIP[4],
- byteIP[11], byteIP[10], byteIP[9], byteIP[8],
- byteIP[15], byteIP[14], byteIP[13], byteIP[12],
- }
- return i, nil
- default:
- return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
- }
-}
-
-// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
- line := &netIPSocketLine{}
- if len(fields) < 10 {
- return nil, fmt.Errorf(
- "%w: Less than 10 columns found %q",
- ErrFileParse,
- strings.Join(fields, " "),
- )
- }
- var err error // parse error
-
- // sl
- s := strings.Split(fields[0], ":")
- if len(s) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
- }
-
- if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
- }
- // local_address
- l := strings.Split(fields[1], ":")
- if len(l) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
- }
- if line.LocalAddr, err = parseIP(l[0]); err != nil {
- return nil, err
- }
- if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
- }
-
- // remote_address
- r := strings.Split(fields[2], ":")
- if len(r) != 2 {
- return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
- }
- if line.RemAddr, err = parseIP(r[0]); err != nil {
- return nil, err
- }
- if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
- }
-
- // st
- if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
- }
-
- // tx_queue and rx_queue
- q := strings.Split(fields[4], ":")
- if len(q) != 2 {
- return nil, fmt.Errorf(
- "%w: Missing colon for tx/rx queues in socket line %q",
- ErrFileParse,
- fields[4],
- )
- }
- if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
- }
- if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
- }
-
- // uid
- if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
- }
-
- // inode
- if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
- return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
- }
-
- // drops
- if isUDP {
- drops, err := strconv.ParseUint(fields[12], 0, 64)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
- }
- line.Drops = &drops
- }
-
- return line, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
deleted file mode 100644
index b6c77b709f..0000000000
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// NetProtocolStats stores the contents from /proc/net/protocols.
-type NetProtocolStats map[string]NetProtocolStatLine
-
-// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
-// only care about the first six columns as the rest are not likely to change
-// and only serve to provide a set of capabilities for each protocol.
-type NetProtocolStatLine struct {
- Name string // 0 The name of the protocol
- Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
- Sockets int64 // 2 Number of sockets in use by this protocol
- Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
- Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
- MaxHeader uint64 // 5 Protocol specific max header size
- Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
- ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
- Capabilities NetProtocolCapabilities
-}
-
-// NetProtocolCapabilities contains a list of capabilities for each protocol.
-type NetProtocolCapabilities struct {
- Close bool // 8
- Connect bool // 9
- Disconnect bool // 10
- Accept bool // 11
- IoCtl bool // 12
- Init bool // 13
- Destroy bool // 14
- Shutdown bool // 15
- SetSockOpt bool // 16
- GetSockOpt bool // 17
- SendMsg bool // 18
- RecvMsg bool // 19
- SendPage bool // 20
- Bind bool // 21
- BacklogRcv bool // 22
- Hash bool // 23
- UnHash bool // 24
- GetPort bool // 25
- EnterMemoryPressure bool // 26
-}
-
-// NetProtocols reads stats from /proc/net/protocols and returns a map of
-// PortocolStatLine entries. As of this writing no official Linux Documentation
-// exists, however the source is fairly self-explanatory and the format seems
-// stable since its introduction in 2.6.12-rc2
-// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
-// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
-func (fs FS) NetProtocols() (NetProtocolStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
- if err != nil {
- return NetProtocolStats{}, err
- }
- return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
-}
-
-func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
- nps := NetProtocolStats{}
-
- // Skip the header line
- s.Scan()
-
- for s.Scan() {
- line, err := nps.parseLine(s.Text())
- if err != nil {
- return NetProtocolStats{}, err
- }
-
- nps[line.Name] = *line
- }
- return nps, nil
-}
-
-func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
- line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
- var err error
- const enabled = "yes"
- const disabled = "no"
-
- fields := strings.Fields(rawLine)
- line.Name = fields[0]
- line.Size, err = strconv.ParseUint(fields[1], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
- if err != nil {
- return nil, err
- }
- line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[4] == enabled {
- line.Pressure = 1
- } else if fields[4] == disabled {
- line.Pressure = 0
- } else {
- line.Pressure = -1
- }
- line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- if fields[6] == enabled {
- line.Slab = true
- } else if fields[6] == disabled {
- line.Slab = false
- } else {
- return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
- }
- line.ModuleName = fields[7]
-
- err = line.Capabilities.parseCapabilities(fields[8:])
- if err != nil {
- return nil, err
- }
-
- return line, nil
-}
-
-func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
- // The capabilities are all bools so we can loop over to map them
- capabilityFields := [...]*bool{
- &pc.Close,
- &pc.Connect,
- &pc.Disconnect,
- &pc.Accept,
- &pc.IoCtl,
- &pc.Init,
- &pc.Destroy,
- &pc.Shutdown,
- &pc.SetSockOpt,
- &pc.GetSockOpt,
- &pc.SendMsg,
- &pc.RecvMsg,
- &pc.SendPage,
- &pc.Bind,
- &pc.BacklogRcv,
- &pc.Hash,
- &pc.UnHash,
- &pc.GetPort,
- &pc.EnterMemoryPressure,
- }
-
- for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
- *capabilityFields[i] = true
- } else if capabilities[i] == "n" {
- *capabilityFields[i] = false
- } else {
- return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
- }
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
deleted file mode 100644
index deb7029fe1..0000000000
--- a/vendor/github.com/prometheus/procfs/net_route.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-const (
- blackholeRepresentation string = "*"
- blackholeIfaceName string = "blackhole"
- routeLineColumns int = 11
-)
-
-// A NetRouteLine represents one line from net/route.
-type NetRouteLine struct {
- Iface string
- Destination uint32
- Gateway uint32
- Flags uint32
- RefCnt uint32
- Use uint32
- Metric uint32
- Mask uint32
- MTU uint32
- Window uint32
- IRTT uint32
-}
-
-func (fs FS) NetRoute() ([]NetRouteLine, error) {
- return readNetRoute(fs.proc.Path("net", "route"))
-}
-
-func readNetRoute(path string) ([]NetRouteLine, error) {
- b, err := util.ReadFileNoStat(path)
- if err != nil {
- return nil, err
- }
-
- routelines, err := parseNetRoute(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
- }
- return routelines, nil
-}
-
-func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
- var routelines []NetRouteLine
-
- scanner := bufio.NewScanner(r)
- scanner.Scan()
- for scanner.Scan() {
- fields := strings.Fields(scanner.Text())
- routeline, err := parseNetRouteLine(fields)
- if err != nil {
- return nil, err
- }
- routelines = append(routelines, *routeline)
- }
- return routelines, nil
-}
-
-func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
- if len(fields) != routeLineColumns {
- return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
- }
- iface := fields[0]
- if iface == blackholeRepresentation {
- iface = blackholeIfaceName
- }
- destination, err := strconv.ParseUint(fields[1], 16, 32)
- if err != nil {
- return nil, err
- }
- gateway, err := strconv.ParseUint(fields[2], 16, 32)
- if err != nil {
- return nil, err
- }
- flags, err := strconv.ParseUint(fields[3], 10, 32)
- if err != nil {
- return nil, err
- }
- refcnt, err := strconv.ParseUint(fields[4], 10, 32)
- if err != nil {
- return nil, err
- }
- use, err := strconv.ParseUint(fields[5], 10, 32)
- if err != nil {
- return nil, err
- }
- metric, err := strconv.ParseUint(fields[6], 10, 32)
- if err != nil {
- return nil, err
- }
- mask, err := strconv.ParseUint(fields[7], 16, 32)
- if err != nil {
- return nil, err
- }
- mtu, err := strconv.ParseUint(fields[8], 10, 32)
- if err != nil {
- return nil, err
- }
- window, err := strconv.ParseUint(fields[9], 10, 32)
- if err != nil {
- return nil, err
- }
- irtt, err := strconv.ParseUint(fields[10], 10, 32)
- if err != nil {
- return nil, err
- }
- routeline := &NetRouteLine{
- Iface: iface,
- Destination: uint32(destination),
- Gateway: uint32(gateway),
- Flags: uint32(flags),
- RefCnt: uint32(refcnt),
- Use: uint32(use),
- Metric: uint32(metric),
- Mask: uint32(mask),
- MTU: uint32(mtu),
- Window: uint32(window),
- IRTT: uint32(irtt),
- }
- return routeline, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
deleted file mode 100644
index fae62b13d9..0000000000
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
-// respectively.
-type NetSockstat struct {
- // Used is non-nil for IPv4 sockstat results, but nil for IPv6.
- Used *int
- Protocols []NetSockstatProtocol
-}
-
-// A NetSockstatProtocol contains statistics about a given socket protocol.
-// Pointer fields indicate that the value may or may not be present on any
-// given protocol.
-type NetSockstatProtocol struct {
- Protocol string
- InUse int
- Orphan *int
- TW *int
- Alloc *int
- Mem *int
- Memory *int
-}
-
-// NetSockstat retrieves IPv4 socket statistics.
-func (fs FS) NetSockstat() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat"))
-}
-
-// NetSockstat6 retrieves IPv6 socket statistics.
-//
-// If IPv6 is disabled on this kernel, the returned error can be checked with
-// os.IsNotExist.
-func (fs FS) NetSockstat6() (*NetSockstat, error) {
- return readSockstat(fs.proc.Path("net", "sockstat6"))
-}
-
-// readSockstat opens and parses a NetSockstat from the input file.
-func readSockstat(name string) (*NetSockstat, error) {
- // This file is small and can be read with one syscall.
- b, err := util.ReadFileNoStat(name)
- if err != nil {
- // Do not wrap this error so the caller can detect os.IsNotExist and
- // similar conditions.
- return nil, err
- }
-
- stat, err := parseSockstat(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
- }
-
- return stat, nil
-}
-
-// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
-func parseSockstat(r io.Reader) (*NetSockstat, error) {
- var stat NetSockstat
- s := bufio.NewScanner(r)
- for s.Scan() {
- // Expect a minimum of a protocol and one key/value pair.
- fields := strings.Split(s.Text(), " ")
- if len(fields) < 3 {
- return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
- }
-
- // The remaining fields are key/value pairs.
- kvs, err := parseSockstatKVs(fields[1:])
- if err != nil {
- return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
- }
-
- // The first field is the protocol. We must trim its colon suffix.
- proto := strings.TrimSuffix(fields[0], ":")
- switch proto {
- case "sockets":
- // Special case: IPv4 has a sockets "used" key/value pair that we
- // embed at the top level of the structure.
- used := kvs["used"]
- stat.Used = &used
- default:
- // Parse all other lines as individual protocols.
- nsp := parseSockstatProtocol(kvs)
- nsp.Protocol = proto
- stat.Protocols = append(stat.Protocols, nsp)
- }
- }
-
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- return &stat, nil
-}
-
-// parseSockstatKVs parses a string slice into a map of key/value pairs.
-func parseSockstatKVs(kvs []string) (map[string]int, error) {
- if len(kvs)%2 != 0 {
- return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
- }
-
- // Iterate two values at a time to gather key/value pairs.
- out := make(map[string]int, len(kvs)/2)
- for i := 0; i < len(kvs); i += 2 {
- vp := util.NewValueParser(kvs[i+1])
- out[kvs[i]] = vp.Int()
-
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return out, nil
-}
-
-// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
-func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
- var nsp NetSockstatProtocol
- for k, v := range kvs {
- // Capture the range variable to ensure we get unique pointers for
- // each of the optional fields.
- v := v
- switch k {
- case "inuse":
- nsp.InUse = v
- case "orphan":
- nsp.Orphan = &v
- case "tw":
- nsp.TW = &v
- case "alloc":
- nsp.Alloc = &v
- case "mem":
- nsp.Mem = &v
- case "memory":
- nsp.Memory = &v
- }
- }
-
- return nsp
-}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
deleted file mode 100644
index 71c8059f4d..0000000000
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// For the proc file format details,
-// See:
-// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
-// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
-// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
-
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
-type SoftnetStat struct {
- // Number of processed packets.
- Processed uint32
- // Number of dropped packets.
- Dropped uint32
- // Number of times processing packets ran out of quota.
- TimeSqueezed uint32
- // Number of collision occur while obtaining device lock while transmitting.
- CPUCollision uint32
- // Number of times cpu woken up received_rps.
- ReceivedRps uint32
- // number of times flow limit has been reached.
- FlowLimitCount uint32
- // Softnet backlog status.
- SoftnetBacklogLen uint32
- // CPU id owning this softnet_data.
- Index uint32
- // softnet_data's Width.
- Width int
-}
-
-var softNetProcFile = "net/softnet_stat"
-
-// NetSoftnetStat reads data from /proc/net/softnet_stat.
-func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
- if err != nil {
- return nil, err
- }
-
- entries, err := parseSoftnet(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
- }
-
- return entries, nil
-}
-
-func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
- const minColumns = 9
-
- s := bufio.NewScanner(r)
-
- var stats []SoftnetStat
- cpuIndex := 0
- for s.Scan() {
- columns := strings.Fields(s.Text())
- width := len(columns)
- softnetStat := SoftnetStat{}
-
- if width < minColumns {
- return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
- }
-
- // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
- if width >= minColumns {
- us, err := parseHexUint32s(columns[0:9])
- if err != nil {
- return nil, err
- }
-
- softnetStat.Processed = us[0]
- softnetStat.Dropped = us[1]
- softnetStat.TimeSqueezed = us[2]
- softnetStat.CPUCollision = us[8]
- }
-
- // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
- if width >= 10 {
- us, err := parseHexUint32s(columns[9:10])
- if err != nil {
- return nil, err
- }
-
- softnetStat.ReceivedRps = us[0]
- }
-
- // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
- if width >= 11 {
- us, err := parseHexUint32s(columns[10:11])
- if err != nil {
- return nil, err
- }
-
- softnetStat.FlowLimitCount = us[0]
- }
-
- // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
- if width >= 13 {
- us, err := parseHexUint32s(columns[11:13])
- if err != nil {
- return nil, err
- }
-
- softnetStat.SoftnetBacklogLen = us[0]
- softnetStat.Index = us[1]
- } else {
- // For older kernels, create the Index based on the scan line number.
- softnetStat.Index = uint32(cpuIndex)
- }
- softnetStat.Width = width
- stats = append(stats, softnetStat)
- cpuIndex++
- }
-
- return stats, nil
-}
-
-func parseHexUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
deleted file mode 100644
index 5277629557..0000000000
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
- NetTCP []*netIPSocketLine
-
- // NetTCPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetTCP it does not collect
- // the parsed lines into a slice.
- NetTCPSummary NetIPSocketSummary
-)
-
-// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp.
-func (fs FS) NetTCP() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
-// read from /proc/net/tcp6.
-func (fs FS) NetTCP6() (NetTCP, error) {
- return newNetTCP(fs.proc.Path("net/tcp6"))
-}
-
-// NetTCPSummary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp.
-func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp"))
-}
-
-// NetTCP6Summary returns already computed statistics like the total queue lengths
-// for TCP datagrams read from /proc/net/tcp6.
-func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
- return newNetTCPSummary(fs.proc.Path("net/tcp6"))
-}
-
-// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
-func newNetTCP(file string) (NetTCP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetTCP(n)
- return n1, err
-}
-
-func newNetTCPSummary(file string) (*NetTCPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetTCPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
deleted file mode 100644
index 13994c1782..0000000000
--- a/vendor/github.com/prometheus/procfs/net_tls_stat.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2023 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// TLSStat struct represents data in /proc/net/tls_stat.
-// See https://docs.kernel.org/networking/tls.html#statistics
-type TLSStat struct {
- // number of TX sessions currently installed where host handles cryptography
- TLSCurrTxSw int
- // number of RX sessions currently installed where host handles cryptography
- TLSCurrRxSw int
- // number of TX sessions currently installed where NIC handles cryptography
- TLSCurrTxDevice int
- // number of RX sessions currently installed where NIC handles cryptography
- TLSCurrRxDevice int
- //number of TX sessions opened with host cryptography
- TLSTxSw int
- //number of RX sessions opened with host cryptography
- TLSRxSw int
- // number of TX sessions opened with NIC cryptography
- TLSTxDevice int
- // number of RX sessions opened with NIC cryptography
- TLSRxDevice int
- // record decryption failed (e.g. due to incorrect authentication tag)
- TLSDecryptError int
- // number of RX resyncs sent to NICs handling cryptography
- TLSRxDeviceResync int
- // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
- TLSDecryptRetry int
- // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
- TLSRxNoPadViolation int
-}
-
-// NewTLSStat reads the tls_stat statistics.
-func NewTLSStat() (TLSStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return TLSStat{}, err
- }
-
- return fs.NewTLSStat()
-}
-
-// NewTLSStat reads the tls_stat statistics.
-func (fs FS) NewTLSStat() (TLSStat, error) {
- file, err := os.Open(fs.proc.Path("net/tls_stat"))
- if err != nil {
- return TLSStat{}, err
- }
- defer file.Close()
-
- var (
- tlsstat = TLSStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return TLSStat{}, err
- }
-
- switch name {
- case "TlsCurrTxSw":
- tlsstat.TLSCurrTxSw = value
- case "TlsCurrRxSw":
- tlsstat.TLSCurrRxSw = value
- case "TlsCurrTxDevice":
- tlsstat.TLSCurrTxDevice = value
- case "TlsCurrRxDevice":
- tlsstat.TLSCurrRxDevice = value
- case "TlsTxSw":
- tlsstat.TLSTxSw = value
- case "TlsRxSw":
- tlsstat.TLSRxSw = value
- case "TlsTxDevice":
- tlsstat.TLSTxDevice = value
- case "TlsRxDevice":
- tlsstat.TLSRxDevice = value
- case "TlsDecryptError":
- tlsstat.TLSDecryptError = value
- case "TlsRxDeviceResync":
- tlsstat.TLSRxDeviceResync = value
- case "TlsDecryptRetry":
- tlsstat.TLSDecryptRetry = value
- case "TlsRxNoPadViolation":
- tlsstat.TLSRxNoPadViolation = value
- }
-
- }
-
- return tlsstat, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
deleted file mode 100644
index 9ac3daf2d4..0000000000
--- a/vendor/github.com/prometheus/procfs/net_udp.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-type (
- // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
- NetUDP []*netIPSocketLine
-
- // NetUDPSummary provides already computed values like the total queue lengths or
- // the total number of used sockets. In contrast to NetUDP it does not collect
- // the parsed lines into a slice.
- NetUDPSummary NetIPSocketSummary
-)
-
-// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp.
-func (fs FS) NetUDP() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
-// read from /proc/net/udp6.
-func (fs FS) NetUDP6() (NetUDP, error) {
- return newNetUDP(fs.proc.Path("net/udp6"))
-}
-
-// NetUDPSummary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp.
-func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp"))
-}
-
-// NetUDP6Summary returns already computed statistics like the total queue lengths
-// for UDP datagrams read from /proc/net/udp6.
-func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
- return newNetUDPSummary(fs.proc.Path("net/udp6"))
-}
-
-// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
-func newNetUDP(file string) (NetUDP, error) {
- n, err := newNetIPSocket(file)
- n1 := NetUDP(n)
- return n1, err
-}
-
-func newNetUDPSummary(file string) (*NetUDPSummary, error) {
- n, err := newNetIPSocketSummary(file)
- if n == nil {
- return nil, err
- }
- n1 := NetUDPSummary(*n)
- return &n1, err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
deleted file mode 100644
index d868cebdaa..0000000000
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// For the proc file format details,
-// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
-// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
-
-// Constants for the various /proc/net/unix enumerations.
-// TODO: match against x/sys/unix or similar?
-const (
- netUnixTypeStream = 1
- netUnixTypeDgram = 2
- netUnixTypeSeqpacket = 5
-
- netUnixFlagDefault = 0
- netUnixFlagListen = 1 << 16
-
- netUnixStateUnconnected = 1
- netUnixStateConnecting = 2
- netUnixStateConnected = 3
- netUnixStateDisconnected = 4
-)
-
-// NetUNIXType is the type of the type field.
-type NetUNIXType uint64
-
-// NetUNIXFlags is the type of the flags field.
-type NetUNIXFlags uint64
-
-// NetUNIXState is the type of the state field.
-type NetUNIXState uint64
-
-// NetUNIXLine represents a line of /proc/net/unix.
-type NetUNIXLine struct {
- KernelPtr string
- RefCount uint64
- Protocol uint64
- Flags NetUNIXFlags
- Type NetUNIXType
- State NetUNIXState
- Inode uint64
- Path string
-}
-
-// NetUNIX holds the data read from /proc/net/unix.
-type NetUNIX struct {
- Rows []*NetUNIXLine
-}
-
-// NetUNIX returns data read from /proc/net/unix.
-func (fs FS) NetUNIX() (*NetUNIX, error) {
- return readNetUNIX(fs.proc.Path("net/unix"))
-}
-
-// readNetUNIX reads data in /proc/net/unix format from the specified file.
-func readNetUNIX(file string) (*NetUNIX, error) {
- // This file could be quite large and a streaming read is desirable versus
- // reading the entire contents at once.
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseNetUNIX(f)
-}
-
-// parseNetUNIX creates a NetUnix structure from the incoming stream.
-func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
- // Begin scanning by checking for the existence of Inode.
- s := bufio.NewScanner(r)
- s.Scan()
-
- // From the man page of proc(5), it does not contain an Inode field,
- // but in actually it exists. This code works for both cases.
- hasInode := strings.Contains(s.Text(), "Inode")
-
- // Expect a minimum number of fields, but Inode and Path are optional:
- // Num RefCount Protocol Flags Type St Inode Path
- minFields := 6
- if hasInode {
- minFields++
- }
-
- var nu NetUNIX
- for s.Scan() {
- line := s.Text()
- item, err := nu.parseLine(line, hasInode, minFields)
- if err != nil {
- return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
- }
-
- nu.Rows = append(nu.Rows, item)
- }
-
- if err := s.Err(); err != nil {
- return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
- }
-
- return &nu, nil
-}
-
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
- fields := strings.Fields(line)
-
- l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
- }
-
- // Field offsets are as follows:
- // Num RefCount Protocol Flags Type St Inode Path
-
- kernelPtr := strings.TrimSuffix(fields[0], ":")
-
- users, err := u.parseUsers(fields[1])
- if err != nil {
- return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
- }
-
- flags, err := u.parseFlags(fields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
- }
-
- typ, err := u.parseType(fields[4])
- if err != nil {
- return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
- }
-
- state, err := u.parseState(fields[5])
- if err != nil {
- return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
- }
-
- var inode uint64
- if hasInode {
- inode, err = u.parseInode(fields[6])
- if err != nil {
- return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
- }
- }
-
- n := &NetUNIXLine{
- KernelPtr: kernelPtr,
- RefCount: users,
- Type: typ,
- Flags: flags,
- State: state,
- Inode: inode,
- }
-
- // Path field is optional.
- if l > min {
- // Path occurs at either index 6 or 7 depending on whether inode is
- // already present.
- pathIdx := 7
- if !hasInode {
- pathIdx--
- }
-
- n.Path = fields[pathIdx]
- }
-
- return n, nil
-}
-
-func (u NetUNIX) parseUsers(s string) (uint64, error) {
- return strconv.ParseUint(s, 16, 32)
-}
-
-func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
- typ, err := strconv.ParseUint(s, 16, 16)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXType(typ), nil
-}
-
-func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
- flags, err := strconv.ParseUint(s, 16, 32)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXFlags(flags), nil
-}
-
-func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
- st, err := strconv.ParseInt(s, 16, 8)
- if err != nil {
- return 0, err
- }
-
- return NetUNIXState(st), nil
-}
-
-func (u NetUNIX) parseInode(s string) (uint64, error) {
- return strconv.ParseUint(s, 10, 64)
-}
-
-func (t NetUNIXType) String() string {
- switch t {
- case netUnixTypeStream:
- return "stream"
- case netUnixTypeDgram:
- return "dgram"
- case netUnixTypeSeqpacket:
- return "seqpacket"
- }
- return "unknown"
-}
-
-func (f NetUNIXFlags) String() string {
- switch f {
- case netUnixFlagListen:
- return "listen"
- default:
- return "default"
- }
-}
-
-func (s NetUNIXState) String() string {
- switch s {
- case netUnixStateUnconnected:
- return "unconnected"
- case netUnixStateConnecting:
- return "connecting"
- case netUnixStateConnected:
- return "connected"
- case netUnixStateDisconnected:
- return "disconnected"
- }
- return "unknown"
-}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
deleted file mode 100644
index 7c597bc870..0000000000
--- a/vendor/github.com/prometheus/procfs/net_wireless.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2023 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Wireless models the content of /proc/net/wireless.
-type Wireless struct {
- Name string
-
- // Status is the current 4-digit hex value status of the interface.
- Status uint64
-
- // QualityLink is the link quality.
- QualityLink int
-
- // QualityLevel is the signal gain (dBm).
- QualityLevel int
-
- // QualityNoise is the signal noise baseline (dBm).
- QualityNoise int
-
- // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
- DiscardedNwid int
-
- // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
- DiscardedCrypt int
-
- // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
- DiscardedFrag int
-
- // DiscardedRetry is the number of discarded packets that reached max MAC retries.
- DiscardedRetry int
-
- // DiscardedMisc is the number of discarded packets for other reasons.
- DiscardedMisc int
-
- // MissedBeacon is the number of missed beacons/superframe.
- MissedBeacon int
-}
-
-// Wireless returns kernel wireless statistics.
-func (fs FS) Wireless() ([]*Wireless, error) {
- b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
- if err != nil {
- return nil, err
- }
-
- m, err := parseWireless(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
- }
-
- return m, nil
-}
-
-// parseWireless parses the contents of /proc/net/wireless.
-/*
-Inter-| sta-| Quality | Discarded packets | Missed | WE
-face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
- eth1: 0000 5. -256. -10. 0 1 0 3 0 0
- eth2: 0000 5. -256. -20. 0 2 0 4 0 0
-*/
-func parseWireless(r io.Reader) ([]*Wireless, error) {
- var (
- interfaces []*Wireless
- scanner = bufio.NewScanner(r)
- )
-
- for n := 0; scanner.Scan(); n++ {
- // Skip the 2 header lines.
- if n < 2 {
- continue
- }
-
- line := scanner.Text()
-
- parts := strings.Split(line, ":")
- if len(parts) != 2 {
- return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
- }
-
- name := strings.TrimSpace(parts[0])
- stats := strings.Fields(parts[1])
-
- if len(stats) < 10 {
- return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
- }
-
- status, err := strconv.ParseUint(stats[0], 16, 16)
- if err != nil {
- return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
- }
-
- qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
- }
-
- qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
- }
-
- qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
- if err != nil {
- return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
- }
-
- dnwid, err := strconv.Atoi(stats[4])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
- }
-
- dcrypt, err := strconv.Atoi(stats[5])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
- }
-
- dfrag, err := strconv.Atoi(stats[6])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
- }
-
- dretry, err := strconv.Atoi(stats[7])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
- }
-
- dmisc, err := strconv.Atoi(stats[8])
- if err != nil {
- return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
- }
-
- mbeacon, err := strconv.Atoi(stats[9])
- if err != nil {
- return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
- }
-
- w := &Wireless{
- Name: name,
- Status: status,
- QualityLink: qlink,
- QualityLevel: qlevel,
- QualityNoise: qnoise,
- DiscardedNwid: dnwid,
- DiscardedCrypt: dcrypt,
- DiscardedFrag: dfrag,
- DiscardedRetry: dretry,
- DiscardedMisc: dmisc,
- MissedBeacon: mbeacon,
- }
-
- interfaces = append(interfaces, w)
- }
-
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
- }
-
- return interfaces, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
deleted file mode 100644
index 932ef20468..0000000000
--- a/vendor/github.com/prometheus/procfs/net_xfrm.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2017 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// XfrmStat models the contents of /proc/net/xfrm_stat.
-type XfrmStat struct {
- // All errors which are not matched by other
- XfrmInError int
- // No buffer is left
- XfrmInBufferError int
- // Header Error
- XfrmInHdrError int
- // No state found
- // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
- XfrmInNoStates int
- // Transformation protocol specific error
- // e.g. SA Key is wrong
- XfrmInStateProtoError int
- // Transformation mode specific error
- XfrmInStateModeError int
- // Sequence error
- // e.g. sequence number is out of window
- XfrmInStateSeqError int
- // State is expired
- XfrmInStateExpired int
- // State has mismatch option
- // e.g. UDP encapsulation type is mismatched
- XfrmInStateMismatch int
- // State is invalid
- XfrmInStateInvalid int
- // No matching template for states
- // e.g. Inbound SAs are correct but SP rule is wrong
- XfrmInTmplMismatch int
- // No policy is found for states
- // e.g. Inbound SAs are correct but no SP is found
- XfrmInNoPols int
- // Policy discards
- XfrmInPolBlock int
- // Policy error
- XfrmInPolError int
- // All errors which are not matched by others
- XfrmOutError int
- // Bundle generation error
- XfrmOutBundleGenError int
- // Bundle check error
- XfrmOutBundleCheckError int
- // No state was found
- XfrmOutNoStates int
- // Transformation protocol specific error
- XfrmOutStateProtoError int
- // Transportation mode specific error
- XfrmOutStateModeError int
- // Sequence error
- // i.e sequence number overflow
- XfrmOutStateSeqError int
- // State is expired
- XfrmOutStateExpired int
- // Policy discads
- XfrmOutPolBlock int
- // Policy is dead
- XfrmOutPolDead int
- // Policy Error
- XfrmOutPolError int
- // Forward routing of a packet is not allowed
- XfrmFwdHdrError int
- // State is invalid, perhaps expired
- XfrmOutStateInvalid int
- // State hasn’t been fully acquired before use
- XfrmAcquireError int
-}
-
-// NewXfrmStat reads the xfrm_stat statistics.
-func NewXfrmStat() (XfrmStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return XfrmStat{}, err
- }
-
- return fs.NewXfrmStat()
-}
-
-// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
-func (fs FS) NewXfrmStat() (XfrmStat, error) {
- file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
- if err != nil {
- return XfrmStat{}, err
- }
- defer file.Close()
-
- var (
- x = XfrmStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return XfrmStat{}, err
- }
-
- switch name {
- case "XfrmInError":
- x.XfrmInError = value
- case "XfrmInBufferError":
- x.XfrmInBufferError = value
- case "XfrmInHdrError":
- x.XfrmInHdrError = value
- case "XfrmInNoStates":
- x.XfrmInNoStates = value
- case "XfrmInStateProtoError":
- x.XfrmInStateProtoError = value
- case "XfrmInStateModeError":
- x.XfrmInStateModeError = value
- case "XfrmInStateSeqError":
- x.XfrmInStateSeqError = value
- case "XfrmInStateExpired":
- x.XfrmInStateExpired = value
- case "XfrmInStateInvalid":
- x.XfrmInStateInvalid = value
- case "XfrmInTmplMismatch":
- x.XfrmInTmplMismatch = value
- case "XfrmInNoPols":
- x.XfrmInNoPols = value
- case "XfrmInPolBlock":
- x.XfrmInPolBlock = value
- case "XfrmInPolError":
- x.XfrmInPolError = value
- case "XfrmOutError":
- x.XfrmOutError = value
- case "XfrmInStateMismatch":
- x.XfrmInStateMismatch = value
- case "XfrmOutBundleGenError":
- x.XfrmOutBundleGenError = value
- case "XfrmOutBundleCheckError":
- x.XfrmOutBundleCheckError = value
- case "XfrmOutNoStates":
- x.XfrmOutNoStates = value
- case "XfrmOutStateProtoError":
- x.XfrmOutStateProtoError = value
- case "XfrmOutStateModeError":
- x.XfrmOutStateModeError = value
- case "XfrmOutStateSeqError":
- x.XfrmOutStateSeqError = value
- case "XfrmOutStateExpired":
- x.XfrmOutStateExpired = value
- case "XfrmOutPolBlock":
- x.XfrmOutPolBlock = value
- case "XfrmOutPolDead":
- x.XfrmOutPolDead = value
- case "XfrmOutPolError":
- x.XfrmOutPolError = value
- case "XfrmFwdHdrError":
- x.XfrmFwdHdrError = value
- case "XfrmOutStateInvalid":
- x.XfrmOutStateInvalid = value
- case "XfrmAcquireError":
- x.XfrmAcquireError = value
- }
-
- }
-
- return x, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
deleted file mode 100644
index 742dff453b..0000000000
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-)
-
-// NetStat contains statistics for all the counters from one file.
-type NetStat struct {
- Stats map[string][]uint64
- Filename string
-}
-
-// NetStat retrieves stats from `/proc/net/stat/`.
-func (fs FS) NetStat() ([]NetStat, error) {
- statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
- if err != nil {
- return nil, err
- }
-
- var netStatsTotal []NetStat
-
- for _, filePath := range statFiles {
- procNetstat, err := parseNetstat(filePath)
- if err != nil {
- return nil, err
- }
- procNetstat.Filename = filepath.Base(filePath)
-
- netStatsTotal = append(netStatsTotal, procNetstat)
- }
- return netStatsTotal, nil
-}
-
-// parseNetstat parses the metrics from `/proc/net/stat/` file
-// and returns a NetStat structure.
-func parseNetstat(filePath string) (NetStat, error) {
- netStat := NetStat{
- Stats: make(map[string][]uint64),
- }
- file, err := os.Open(filePath)
- if err != nil {
- return netStat, err
- }
- defer file.Close()
-
- scanner := bufio.NewScanner(file)
- scanner.Scan()
-
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
-
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return NetStat{}, err
- }
- netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
- }
- }
-
- return netStat, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
deleted file mode 100644
index 0e8c4fa0b0..0000000000
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Proc provides information about a running process.
-type Proc struct {
- // The process ID.
- PID int
-
- fs FS
-}
-
-// Procs represents a list of Proc structs.
-type Procs []Proc
-
-var (
- ErrFileParse = errors.New("Error Parsing File")
- ErrFileRead = errors.New("Error Reading File")
- ErrMountPoint = errors.New("Error Accessing Mount point")
-)
-
-func (p Procs) Len() int { return len(p) }
-func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
-
-// Self returns a process for the current process read via /proc/self.
-func Self() (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil || errors.Unwrap(err) == ErrMountPoint {
- return Proc{}, err
- }
- return fs.Self()
-}
-
-// NewProc returns a process for the given pid under /proc.
-func NewProc(pid int) (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// AllProcs returns a list of all currently available processes under /proc.
-func AllProcs() (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllProcs()
-}
-
-// Self returns a process for the current process.
-func (fs FS) Self() (Proc, error) {
- p, err := os.Readlink(fs.proc.Path("self"))
- if err != nil {
- return Proc{}, err
- }
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
- if err != nil {
- return Proc{}, err
- }
- return fs.Proc(pid)
-}
-
-// NewProc returns a process for the given pid.
-//
-// Deprecated: Use fs.Proc() instead.
-func (fs FS) NewProc(pid int) (Proc, error) {
- return fs.Proc(pid)
-}
-
-// Proc returns a process for the given pid.
-func (fs FS) Proc(pid int) (Proc, error) {
- if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: pid, fs: fs}, nil
-}
-
-// AllProcs returns a list of all currently available processes.
-func (fs FS) AllProcs() (Procs, error) {
- d, err := os.Open(fs.proc.Path())
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
- }
-
- p := Procs{}
- for _, n := range names {
- pid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
- p = append(p, Proc{PID: int(pid), fs: fs})
- }
-
- return p, nil
-}
-
-// CmdLine returns the command line of a process.
-func (p Proc) CmdLine() ([]string, error) {
- data, err := util.ReadFileNoStat(p.path("cmdline"))
- if err != nil {
- return nil, err
- }
-
- if len(data) < 1 {
- return []string{}, nil
- }
-
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
-}
-
-// Wchan returns the wchan (wait channel) of a process.
-func (p Proc) Wchan() (string, error) {
- f, err := os.Open(p.path("wchan"))
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- data, err := io.ReadAll(f)
- if err != nil {
- return "", err
- }
-
- wchan := string(data)
- if wchan == "" || wchan == "0" {
- return "", nil
- }
-
- return wchan, nil
-}
-
-// Comm returns the command name of a process.
-func (p Proc) Comm() (string, error) {
- data, err := util.ReadFileNoStat(p.path("comm"))
- if err != nil {
- return "", err
- }
-
- return strings.TrimSpace(string(data)), nil
-}
-
-// Executable returns the absolute path of the executable command of a process.
-func (p Proc) Executable() (string, error) {
- exe, err := os.Readlink(p.path("exe"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return exe, err
-}
-
-// Cwd returns the absolute path to the current working directory of the process.
-func (p Proc) Cwd() (string, error) {
- wd, err := os.Readlink(p.path("cwd"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return wd, err
-}
-
-// RootDir returns the absolute path to the process's root directory (as set by chroot).
-func (p Proc) RootDir() (string, error) {
- rdir, err := os.Readlink(p.path("root"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return rdir, err
-}
-
-// FileDescriptors returns the currently open file descriptors of a process.
-func (p Proc) FileDescriptors() ([]uintptr, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- fds := make([]uintptr, len(names))
- for i, n := range names {
- fd, err := strconv.ParseInt(n, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
- }
- fds[i] = uintptr(fd)
- }
-
- return fds, nil
-}
-
-// FileDescriptorTargets returns the targets of all file descriptors of a process.
-// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
-func (p Proc) FileDescriptorTargets() ([]string, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- targets := make([]string, len(names))
-
- for i, name := range names {
- target, err := os.Readlink(p.path("fd", name))
- if err == nil {
- targets[i] = target
- }
- }
-
- return targets, nil
-}
-
-// FileDescriptorsLen returns the number of currently open file descriptors of
-// a process.
-func (p Proc) FileDescriptorsLen() (int, error) {
- // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
- if p.fs.isReal {
- stat, err := os.Stat(p.path("fd"))
- if err != nil {
- return 0, err
- }
-
- size := stat.Size()
- if size > 0 {
- return int(size), nil
- }
- }
-
- fds, err := p.fileDescriptors()
- if err != nil {
- return 0, err
- }
-
- return len(fds), nil
-}
-
-// MountStats retrieves statistics and configuration for mount points in a
-// process's namespace.
-func (p Proc) MountStats() ([]*Mount, error) {
- f, err := os.Open(p.path("mountstats"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseMountStats(f)
-}
-
-// MountInfo retrieves mount information for mount points in a
-// process's namespace.
-// It supplies information missing in `/proc/self/mounts` and
-// fixes various other problems with that file too.
-func (p Proc) MountInfo() ([]*MountInfo, error) {
- data, err := util.ReadFileNoStat(p.path("mountinfo"))
- if err != nil {
- return nil, err
- }
- return parseMountInfo(data)
-}
-
-func (p Proc) fileDescriptors() ([]string, error) {
- d, err := os.Open(p.path("fd"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
- }
-
- return names, nil
-}
-
-func (p Proc) path(pa ...string) string {
- return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
-}
-
-// FileDescriptorsInfo retrieves information about all file descriptors of
-// the process.
-func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- var fdinfos ProcFDInfos
-
- for _, n := range names {
- fdinfo, err := p.FDInfo(n)
- if err != nil {
- continue
- }
- fdinfos = append(fdinfos, *fdinfo)
- }
-
- return fdinfos, nil
-}
-
-// Schedstat returns task scheduling information for the process.
-func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := os.ReadFile(p.path("schedstat"))
- if err != nil {
- return ProcSchedstat{}, err
- }
- return parseProcSchedstat(string(contents))
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
deleted file mode 100644
index daeed7f571..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
-// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
-// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
-// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
-// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
-// in this hierarchy
-//
-// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
-type Cgroup struct {
- // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
- // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
- HierarchyID int
- // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
- // Cgroups V2 this may be empty, as all active controllers use the same hierarchy
- Controllers []string
- // Path of this control group, relative to the mount point of the cgroupfs representing this specific
- // hierarchy
- Path string
-}
-
-// parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path.
-func parseCgroupString(cgroupStr string) (*Cgroup, error) {
- var err error
-
- fields := strings.SplitN(cgroupStr, ":", 3)
- if len(fields) < 3 {
- return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
- }
-
- cgroup := &Cgroup{
- Path: fields[2],
- Controllers: nil,
- }
- cgroup.HierarchyID, err = strconv.Atoi(fields[0])
- if err != nil {
- return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
- }
- if fields[1] != "" {
- ssNames := strings.Split(fields[1], ",")
- cgroup.Controllers = append(cgroup.Controllers, ssNames...)
- }
- return cgroup, nil
-}
-
-// parseCgroups reads each line of the /proc/[pid]/cgroup file.
-func parseCgroups(data []byte) ([]Cgroup, error) {
- var cgroups []Cgroup
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- mountString := scanner.Text()
- parsedMounts, err := parseCgroupString(mountString)
- if err != nil {
- return nil, err
- }
- cgroups = append(cgroups, *parsedMounts)
- }
-
- err := scanner.Err()
- return cgroups, err
-}
-
-// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process
-// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system.
-func (p Proc) Cgroups() ([]Cgroup, error) {
- data, err := util.ReadFileNoStat(p.path("cgroup"))
- if err != nil {
- return nil, err
- }
- return parseCgroups(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
deleted file mode 100644
index 5dd4938999..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_cgroups.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2021 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CgroupSummary models one line from /proc/cgroups.
-// This file contains information about the controllers that are compiled into the kernel.
-//
-// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
-type CgroupSummary struct {
- // The name of the controller. controller is also known as subsystem.
- SubsysName string
- // The unique ID of the cgroup hierarchy on which this controller is mounted.
- Hierarchy int
- // The number of control groups in this hierarchy using this controller.
- Cgroups int
- // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
- Enabled int
-}
-
-// parseCgroupSummary parses each line of the /proc/cgroup file
-// Line format is `subsys_name hierarchy num_cgroups enabled`.
-func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
- var err error
-
- fields := strings.Fields(CgroupSummaryStr)
- // require at least 4 fields
- if len(fields) < 4 {
- return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
- }
-
- CgroupSummary := &CgroupSummary{
- SubsysName: fields[0],
- }
- CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
- }
- CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
- }
- CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
- }
- return CgroupSummary, nil
-}
-
-// parseCgroupSummary reads each line of the /proc/cgroup file.
-func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
- var CgroupSummarys []CgroupSummary
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- CgroupSummaryString := scanner.Text()
- // ignore comment lines
- if strings.HasPrefix(CgroupSummaryString, "#") {
- continue
- }
- CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
- if err != nil {
- return nil, err
- }
- CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
- }
-
- err := scanner.Err()
- return CgroupSummarys, err
-}
-
-// CgroupSummarys returns information about current /proc/cgroups.
-func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
- if err != nil {
- return nil, err
- }
- return parseCgroupSummary(data)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
deleted file mode 100644
index 57a89895d6..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Environ reads process environments from `/proc//environ`.
-func (p Proc) Environ() ([]string, error) {
- environments := make([]string, 0)
-
- data, err := util.ReadFileNoStat(p.path("environ"))
- if err != nil {
- return environments, err
- }
-
- environments = strings.Split(string(data), "\000")
- if len(environments) > 0 {
- environments = environments[:len(environments)-1]
- }
-
- return environments, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
deleted file mode 100644
index fa761b3529..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
- rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
- rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
- rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
- rInotify = regexp.MustCompile(`^inotify`)
- rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
-)
-
-// ProcFDInfo contains represents file descriptor information.
-type ProcFDInfo struct {
- // File descriptor
- FD string
- // File offset
- Pos string
- // File access mode and status flags
- Flags string
- // Mount point ID
- MntID string
- // Inode number
- Ino string
- // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
- InotifyInfos []InotifyInfo
-}
-
-// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
-func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
- data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
- if err != nil {
- return nil, err
- }
-
- var text, pos, flags, mntid, ino string
- var inotify []InotifyInfo
-
- scanner := bufio.NewScanner(bytes.NewReader(data))
- for scanner.Scan() {
- text = scanner.Text()
- if rPos.MatchString(text) {
- pos = rPos.FindStringSubmatch(text)[1]
- } else if rFlags.MatchString(text) {
- flags = rFlags.FindStringSubmatch(text)[1]
- } else if rMntID.MatchString(text) {
- mntid = rMntID.FindStringSubmatch(text)[1]
- } else if rIno.MatchString(text) {
- ino = rIno.FindStringSubmatch(text)[1]
- } else if rInotify.MatchString(text) {
- newInotify, err := parseInotifyInfo(text)
- if err != nil {
- return nil, err
- }
- inotify = append(inotify, *newInotify)
- }
- }
-
- i := &ProcFDInfo{
- FD: fd,
- Pos: pos,
- Flags: flags,
- MntID: mntid,
- Ino: ino,
- InotifyInfos: inotify,
- }
-
- return i, nil
-}
-
-// InotifyInfo represents a single inotify line in the fdinfo file.
-type InotifyInfo struct {
- // Watch descriptor number
- WD string
- // Inode number
- Ino string
- // Device ID
- Sdev string
- // Mask of events being monitored
- Mask string
-}
-
-// InotifyInfo constructor. Only available on kernel 3.8+.
-func parseInotifyInfo(line string) (*InotifyInfo, error) {
- m := rInotifyParts.FindStringSubmatch(line)
- if len(m) >= 4 {
- var mask string
- if len(m) == 5 {
- mask = m[4]
- }
- i := &InotifyInfo{
- WD: m[1],
- Ino: m[2],
- Sdev: m[3],
- Mask: mask,
- }
- return i, nil
- }
- return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
-}
-
-// ProcFDInfos represents a list of ProcFDInfo structs.
-type ProcFDInfos []ProcFDInfo
-
-func (p ProcFDInfos) Len() int { return len(p) }
-func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-
-// InotifyWatchLen returns the total number of inotify watches.
-func (p ProcFDInfos) InotifyWatchLen() (int, error) {
- length := 0
- for _, f := range p {
- length += len(f.InotifyInfos)
- }
-
- return length, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
deleted file mode 100644
index 86b4b45246..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_interrupts.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Interrupt represents a single interrupt line.
-type Interrupt struct {
- // Info is the type of interrupt.
- Info string
- // Devices is the name of the device that is located at that IRQ
- Devices string
- // Values is the number of interrupts per CPU.
- Values []string
-}
-
-// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
-// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
-// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
-type Interrupts map[string]Interrupt
-
-// Interrupts creates a new instance from a given Proc instance.
-func (p Proc) Interrupts() (Interrupts, error) {
- data, err := util.ReadFileNoStat(p.path("interrupts"))
- if err != nil {
- return nil, err
- }
- return parseInterrupts(bytes.NewReader(data))
-}
-
-func parseInterrupts(r io.Reader) (Interrupts, error) {
- var (
- interrupts = Interrupts{}
- scanner = bufio.NewScanner(r)
- )
-
- if !scanner.Scan() {
- return nil, errors.New("interrupts empty")
- }
- cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
-
- for scanner.Scan() {
- parts := strings.Fields(scanner.Text())
- if len(parts) == 0 { // skip empty lines
- continue
- }
- if len(parts) < 2 {
- return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
- }
- intName := parts[0][:len(parts[0])-1] // remove trailing :
-
- if len(parts) == 2 {
- interrupts[intName] = Interrupt{
- Info: "",
- Devices: "",
- Values: []string{
- parts[1],
- },
- }
- continue
- }
-
- intr := Interrupt{
- Values: parts[1 : cpuNum+1],
- }
-
- if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
- intr.Info = parts[cpuNum+1]
- intr.Devices = strings.Join(parts[cpuNum+2:], " ")
- } else {
- intr.Info = strings.Join(parts[cpuNum+1:], " ")
- }
- interrupts[intName] = intr
- }
-
- return interrupts, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
deleted file mode 100644
index 776f349717..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcIO models the content of /proc//io.
-type ProcIO struct {
- // Chars read.
- RChar uint64
- // Chars written.
- WChar uint64
- // Read syscalls.
- SyscR uint64
- // Write syscalls.
- SyscW uint64
- // Bytes read.
- ReadBytes uint64
- // Bytes written.
- WriteBytes uint64
- // Bytes written, but taking into account truncation. See
- // Documentation/filesystems/proc.txt in the kernel sources for
- // detailed explanation.
- CancelledWriteBytes int64
-}
-
-// IO creates a new ProcIO instance from a given Proc instance.
-func (p Proc) IO() (ProcIO, error) {
- pio := ProcIO{}
-
- data, err := util.ReadFileNoStat(p.path("io"))
- if err != nil {
- return pio, err
- }
-
- ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
- "read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
-
- _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
- &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
-
- return pio, err
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
deleted file mode 100644
index 9530b14bc6..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "regexp"
- "strconv"
-)
-
-// ProcLimits represents the soft limits for each of the process's resource
-// limits. For more information see getrlimit(2):
-// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
-type ProcLimits struct {
- // CPU time limit in seconds.
- CPUTime uint64
- // Maximum size of files that the process may create.
- FileSize uint64
- // Maximum size of the process's data segment (initialized data,
- // uninitialized data, and heap).
- DataSize uint64
- // Maximum size of the process stack in bytes.
- StackSize uint64
- // Maximum size of a core file.
- CoreFileSize uint64
- // Limit of the process's resident set in pages.
- ResidentSet uint64
- // Maximum number of processes that can be created for the real user ID of
- // the calling process.
- Processes uint64
- // Value one greater than the maximum file descriptor number that can be
- // opened by this process.
- OpenFiles uint64
- // Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory uint64
- // Maximum size of the process's virtual memory address space in bytes.
- AddressSpace uint64
- // Limit on the combined number of flock(2) locks and fcntl(2) leases that
- // this process may establish.
- FileLocks uint64
- // Limit of signals that may be queued for the real user ID of the calling
- // process.
- PendingSignals uint64
- // Limit on the number of bytes that can be allocated for POSIX message
- // queues for the real user ID of the calling process.
- MsqqueueSize uint64
- // Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority uint64
- // Limit of the real-time priority set using sched_setscheduler(2) or
- // sched_setparam(2).
- RealtimePriority uint64
- // Limit (in microseconds) on the amount of CPU time that a process
- // scheduled under a real-time scheduling policy may consume without making
- // a blocking system call.
- RealtimeTimeout uint64
-}
-
-const (
- limitsFields = 4
- limitsUnlimited = "unlimited"
-)
-
-var (
- limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
-)
-
-// NewLimits returns the current soft limits of the process.
-//
-// Deprecated: Use p.Limits() instead.
-func (p Proc) NewLimits() (ProcLimits, error) {
- return p.Limits()
-}
-
-// Limits returns the current soft limits of the process.
-func (p Proc) Limits() (ProcLimits, error) {
- f, err := os.Open(p.path("limits"))
- if err != nil {
- return ProcLimits{}, err
- }
- defer f.Close()
-
- var (
- l = ProcLimits{}
- s = bufio.NewScanner(f)
- )
-
- s.Scan() // Skip limits header
-
- for s.Scan() {
- //fields := limitsMatch.Split(s.Text(), limitsFields)
- fields := limitsMatch.FindStringSubmatch(s.Text())
- if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
- }
-
- switch fields[1] {
- case "Max cpu time":
- l.CPUTime, err = parseUint(fields[2])
- case "Max file size":
- l.FileSize, err = parseUint(fields[2])
- case "Max data size":
- l.DataSize, err = parseUint(fields[2])
- case "Max stack size":
- l.StackSize, err = parseUint(fields[2])
- case "Max core file size":
- l.CoreFileSize, err = parseUint(fields[2])
- case "Max resident set":
- l.ResidentSet, err = parseUint(fields[2])
- case "Max processes":
- l.Processes, err = parseUint(fields[2])
- case "Max open files":
- l.OpenFiles, err = parseUint(fields[2])
- case "Max locked memory":
- l.LockedMemory, err = parseUint(fields[2])
- case "Max address space":
- l.AddressSpace, err = parseUint(fields[2])
- case "Max file locks":
- l.FileLocks, err = parseUint(fields[2])
- case "Max pending signals":
- l.PendingSignals, err = parseUint(fields[2])
- case "Max msgqueue size":
- l.MsqqueueSize, err = parseUint(fields[2])
- case "Max nice priority":
- l.NicePriority, err = parseUint(fields[2])
- case "Max realtime priority":
- l.RealtimePriority, err = parseUint(fields[2])
- case "Max realtime timeout":
- l.RealtimeTimeout, err = parseUint(fields[2])
- }
- if err != nil {
- return ProcLimits{}, err
- }
- }
-
- return l, s.Err()
-}
-
-func parseUint(s string) (uint64, error) {
- if s == limitsUnlimited {
- return 18446744073709551615, nil
- }
- i, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
- }
- return i, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
deleted file mode 100644
index 7e75c286b5..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
-// +build !js
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "golang.org/x/sys/unix"
-)
-
-// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
-type ProcMapPermissions struct {
- // mapping has the [R]ead flag set
- Read bool
- // mapping has the [W]rite flag set
- Write bool
- // mapping has the [X]ecutable flag set
- Execute bool
- // mapping has the [S]hared flag set
- Shared bool
- // mapping is marked as [P]rivate (copy on write)
- Private bool
-}
-
-// ProcMap contains the process memory-mappings of the process
-// read from `/proc/[pid]/maps`.
-type ProcMap struct {
- // The start address of current mapping.
- StartAddr uintptr
- // The end address of the current mapping
- EndAddr uintptr
- // The permissions for this mapping
- Perms *ProcMapPermissions
- // The current offset into the file/fd (e.g., shared libs)
- Offset int64
- // Device owner of this mapping (major:minor) in Mkdev format.
- Dev uint64
- // The inode of the device above
- Inode uint64
- // The file or psuedofile (or empty==anonymous)
- Pathname string
-}
-
-// parseDevice parses the device token of a line and converts it to a dev_t
-// (mkdev) like structure.
-func parseDevice(s string) (uint64, error) {
- i := strings.Index(s, ":")
- if i == -1 {
- return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
- }
-
- major, err := strconv.ParseUint(s[0:i], 16, 0)
- if err != nil {
- return 0, err
- }
-
- minor, err := strconv.ParseUint(s[i+1:], 16, 0)
- if err != nil {
- return 0, err
- }
-
- return unix.Mkdev(uint32(major), uint32(minor)), nil
-}
-
-// parseAddress converts a hex-string to a uintptr.
-func parseAddress(s string) (uintptr, error) {
- a, err := strconv.ParseUint(s, 16, 0)
- if err != nil {
- return 0, err
- }
-
- return uintptr(a), nil
-}
-
-// parseAddresses parses the start-end address.
-func parseAddresses(s string) (uintptr, uintptr, error) {
- idx := strings.Index(s, "-")
- if idx == -1 {
- return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
- }
-
- saddr, err := parseAddress(s[0:idx])
- if err != nil {
- return 0, 0, err
- }
-
- eaddr, err := parseAddress(s[idx+1:])
- if err != nil {
- return 0, 0, err
- }
-
- return saddr, eaddr, nil
-}
-
-// parsePermissions parses a token and returns any that are set.
-func parsePermissions(s string) (*ProcMapPermissions, error) {
- if len(s) < 4 {
- return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
- }
-
- perms := ProcMapPermissions{}
- for _, ch := range s {
- switch ch {
- case 'r':
- perms.Read = true
- case 'w':
- perms.Write = true
- case 'x':
- perms.Execute = true
- case 'p':
- perms.Private = true
- case 's':
- perms.Shared = true
- }
- }
-
- return &perms, nil
-}
-
-// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
-// buffer.
-func parseProcMap(text string) (*ProcMap, error) {
- fields := strings.Fields(text)
- if len(fields) < 5 {
- return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
- }
-
- saddr, eaddr, err := parseAddresses(fields[0])
- if err != nil {
- return nil, err
- }
-
- perms, err := parsePermissions(fields[1])
- if err != nil {
- return nil, err
- }
-
- offset, err := strconv.ParseInt(fields[2], 16, 0)
- if err != nil {
- return nil, err
- }
-
- device, err := parseDevice(fields[3])
- if err != nil {
- return nil, err
- }
-
- inode, err := strconv.ParseUint(fields[4], 10, 0)
- if err != nil {
- return nil, err
- }
-
- pathname := ""
-
- if len(fields) >= 5 {
- pathname = strings.Join(fields[5:], " ")
- }
-
- return &ProcMap{
- StartAddr: saddr,
- EndAddr: eaddr,
- Perms: perms,
- Offset: offset,
- Dev: device,
- Inode: inode,
- Pathname: pathname,
- }, nil
-}
-
-// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
-// process.
-func (p Proc) ProcMaps() ([]*ProcMap, error) {
- file, err := os.Open(p.path("maps"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- maps := []*ProcMap{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- m, err := parseProcMap(scan.Text())
- if err != nil {
- return nil, err
- }
-
- maps = append(maps, m)
- }
-
- return maps, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
deleted file mode 100644
index 8e3ff4d794..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcNetstat models the content of /proc//net/netstat.
-type ProcNetstat struct {
- // The process ID.
- PID int
- TcpExt
- IpExt
-}
-
-type TcpExt struct { // nolint:revive
- SyncookiesSent *float64
- SyncookiesRecv *float64
- SyncookiesFailed *float64
- EmbryonicRsts *float64
- PruneCalled *float64
- RcvPruned *float64
- OfoPruned *float64
- OutOfWindowIcmps *float64
- LockDroppedIcmps *float64
- ArpFilter *float64
- TW *float64
- TWRecycled *float64
- TWKilled *float64
- PAWSActive *float64
- PAWSEstab *float64
- DelayedACKs *float64
- DelayedACKLocked *float64
- DelayedACKLost *float64
- ListenOverflows *float64
- ListenDrops *float64
- TCPHPHits *float64
- TCPPureAcks *float64
- TCPHPAcks *float64
- TCPRenoRecovery *float64
- TCPSackRecovery *float64
- TCPSACKReneging *float64
- TCPSACKReorder *float64
- TCPRenoReorder *float64
- TCPTSReorder *float64
- TCPFullUndo *float64
- TCPPartialUndo *float64
- TCPDSACKUndo *float64
- TCPLossUndo *float64
- TCPLostRetransmit *float64
- TCPRenoFailures *float64
- TCPSackFailures *float64
- TCPLossFailures *float64
- TCPFastRetrans *float64
- TCPSlowStartRetrans *float64
- TCPTimeouts *float64
- TCPLossProbes *float64
- TCPLossProbeRecovery *float64
- TCPRenoRecoveryFail *float64
- TCPSackRecoveryFail *float64
- TCPRcvCollapsed *float64
- TCPDSACKOldSent *float64
- TCPDSACKOfoSent *float64
- TCPDSACKRecv *float64
- TCPDSACKOfoRecv *float64
- TCPAbortOnData *float64
- TCPAbortOnClose *float64
- TCPAbortOnMemory *float64
- TCPAbortOnTimeout *float64
- TCPAbortOnLinger *float64
- TCPAbortFailed *float64
- TCPMemoryPressures *float64
- TCPMemoryPressuresChrono *float64
- TCPSACKDiscard *float64
- TCPDSACKIgnoredOld *float64
- TCPDSACKIgnoredNoUndo *float64
- TCPSpuriousRTOs *float64
- TCPMD5NotFound *float64
- TCPMD5Unexpected *float64
- TCPMD5Failure *float64
- TCPSackShifted *float64
- TCPSackMerged *float64
- TCPSackShiftFallback *float64
- TCPBacklogDrop *float64
- PFMemallocDrop *float64
- TCPMinTTLDrop *float64
- TCPDeferAcceptDrop *float64
- IPReversePathFilter *float64
- TCPTimeWaitOverflow *float64
- TCPReqQFullDoCookies *float64
- TCPReqQFullDrop *float64
- TCPRetransFail *float64
- TCPRcvCoalesce *float64
- TCPRcvQDrop *float64
- TCPOFOQueue *float64
- TCPOFODrop *float64
- TCPOFOMerge *float64
- TCPChallengeACK *float64
- TCPSYNChallenge *float64
- TCPFastOpenActive *float64
- TCPFastOpenActiveFail *float64
- TCPFastOpenPassive *float64
- TCPFastOpenPassiveFail *float64
- TCPFastOpenListenOverflow *float64
- TCPFastOpenCookieReqd *float64
- TCPFastOpenBlackhole *float64
- TCPSpuriousRtxHostQueues *float64
- BusyPollRxPackets *float64
- TCPAutoCorking *float64
- TCPFromZeroWindowAdv *float64
- TCPToZeroWindowAdv *float64
- TCPWantZeroWindowAdv *float64
- TCPSynRetrans *float64
- TCPOrigDataSent *float64
- TCPHystartTrainDetect *float64
- TCPHystartTrainCwnd *float64
- TCPHystartDelayDetect *float64
- TCPHystartDelayCwnd *float64
- TCPACKSkippedSynRecv *float64
- TCPACKSkippedPAWS *float64
- TCPACKSkippedSeq *float64
- TCPACKSkippedFinWait2 *float64
- TCPACKSkippedTimeWait *float64
- TCPACKSkippedChallenge *float64
- TCPWinProbe *float64
- TCPKeepAlive *float64
- TCPMTUPFail *float64
- TCPMTUPSuccess *float64
- TCPWqueueTooBig *float64
-}
-
-type IpExt struct { // nolint:revive
- InNoRoutes *float64
- InTruncatedPkts *float64
- InMcastPkts *float64
- OutMcastPkts *float64
- InBcastPkts *float64
- OutBcastPkts *float64
- InOctets *float64
- OutOctets *float64
- InMcastOctets *float64
- OutMcastOctets *float64
- InBcastOctets *float64
- OutBcastOctets *float64
- InCsumErrors *float64
- InNoECTPkts *float64
- InECT1Pkts *float64
- InECT0Pkts *float64
- InCEPkts *float64
- ReasmOverlaps *float64
-}
-
-func (p Proc) Netstat() (ProcNetstat, error) {
- filename := p.path("net/netstat")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- return ProcNetstat{PID: p.PID}, err
- }
- procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
- procNetstat.PID = p.PID
- return procNetstat, err
-}
-
-// parseProcNetstat parses the metrics from proc//net/netstat file
-// and returns a ProcNetstat structure.
-func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
- var (
- scanner = bufio.NewScanner(r)
- procNetstat = ProcNetstat{}
- )
-
- for scanner.Scan() {
- nameParts := strings.Split(scanner.Text(), " ")
- scanner.Scan()
- valueParts := strings.Split(scanner.Text(), " ")
- // Remove trailing :.
- protocol := strings.TrimSuffix(nameParts[0], ":")
- if len(nameParts) != len(valueParts) {
- return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
- ErrFileParse, fileName, protocol)
- }
- for i := 1; i < len(nameParts); i++ {
- value, err := strconv.ParseFloat(valueParts[i], 64)
- if err != nil {
- return procNetstat, err
- }
- key := nameParts[i]
-
- switch protocol {
- case "TcpExt":
- switch key {
- case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = &value
- case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = &value
- case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = &value
- case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = &value
- case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = &value
- case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = &value
- case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = &value
- case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = &value
- case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = &value
- case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = &value
- case "TW":
- procNetstat.TcpExt.TW = &value
- case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = &value
- case "TWKilled":
- procNetstat.TcpExt.TWKilled = &value
- case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = &value
- case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = &value
- case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = &value
- case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = &value
- case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = &value
- case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = &value
- case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = &value
- case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = &value
- case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = &value
- case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = &value
- case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = &value
- case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = &value
- case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = &value
- case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = &value
- case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = &value
- case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = &value
- case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = &value
- case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = &value
- case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = &value
- case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = &value
- case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = &value
- case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = &value
- case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = &value
- case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = &value
- case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = &value
- case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = &value
- case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = &value
- case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = &value
- case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = &value
- case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = &value
- case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = &value
- case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = &value
- case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = &value
- case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = &value
- case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = &value
- case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = &value
- case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = &value
- case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = &value
- case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = &value
- case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = &value
- case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = &value
- case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = &value
- case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = &value
- case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = &value
- case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = &value
- case "TCPRcvQDrop":
- procNetstat.TcpExt.TCPRcvQDrop = &value
- case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = &value
- case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = &value
- case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = &value
- case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = &value
- case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = &value
- case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = &value
- case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = &value
- case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = &value
- case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
- case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
- case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
- case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = &value
- case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
- case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = &value
- case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = &value
- case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
- case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = &value
- case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
- case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = &value
- case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = &value
- case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = &value
- case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = &value
- case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = &value
- case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = &value
- case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
- case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = &value
- case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = &value
- case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
- case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
- case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = &value
- case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = &value
- case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = &value
- case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = &value
- case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = &value
- case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = &value
- }
- case "IpExt":
- switch key {
- case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = &value
- case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = &value
- case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = &value
- case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = &value
- case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = &value
- case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = &value
- case "InOctets":
- procNetstat.IpExt.InOctets = &value
- case "OutOctets":
- procNetstat.IpExt.OutOctets = &value
- case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = &value
- case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = &value
- case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = &value
- case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = &value
- case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = &value
- case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = &value
- case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = &value
- case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = &value
- case "InCEPkts":
- procNetstat.IpExt.InCEPkts = &value
- case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = &value
- }
- }
- }
- }
- return procNetstat, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
deleted file mode 100644
index 0f8f847f95..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// Namespace represents a single namespace of a process.
-type Namespace struct {
- Type string // Namespace type.
- Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
-}
-
-// Namespaces contains all of the namespaces that the process is contained in.
-type Namespaces map[string]Namespace
-
-// Namespaces reads from /proc//ns/* to get the namespaces of which the
-// process is a member.
-func (p Proc) Namespaces() (Namespaces, error) {
- d, err := os.Open(p.path("ns"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
- }
-
- ns := make(Namespaces, len(names))
- for _, name := range names {
- target, err := os.Readlink(p.path("ns", name))
- if err != nil {
- return nil, err
- }
-
- fields := strings.SplitN(target, ":", 2)
- if len(fields) != 2 {
- return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
- }
-
- typ := fields[0]
- inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
- if err != nil {
- return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
- }
-
- ns[name] = Namespace{typ, uint32(inode)}
- }
-
- return ns, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
deleted file mode 100644
index ccd35f153a..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-// The PSI / pressure interface is described at
-// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
-// Each resource (cpu, io, memory, ...) is exposed as a single file.
-// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
-// Each line contains several averages (over n seconds) and a total in µs.
-//
-// Example io pressure file:
-// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
-// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
-
-// PSILine is a single line of values as returned by `/proc/pressure/*`.
-//
-// The Avg entries are averages over n seconds, as a percentage.
-// The Total line is in microseconds.
-type PSILine struct {
- Avg10 float64
- Avg60 float64
- Avg300 float64
- Total uint64
-}
-
-// PSIStats represent pressure stall information from /proc/pressure/*
-//
-// "Some" indicates the share of time in which at least some tasks are stalled.
-// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
-type PSIStats struct {
- Some *PSILine
- Full *PSILine
-}
-
-// PSIStatsForResource reads pressure stall information for the specified
-// resource from /proc/pressure/. At time of writing this can be
-// either "cpu", "memory" or "io".
-func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
- if err != nil {
- return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
- }
-
- return parsePSIStats(bytes.NewReader(data))
-}
-
-// parsePSIStats parses the specified file for pressure stall information.
-func parsePSIStats(r io.Reader) (PSIStats, error) {
- psiStats := PSIStats{}
-
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- l := scanner.Text()
- prefix := strings.Split(l, " ")[0]
- switch prefix {
- case "some":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Some = &psi
- case "full":
- psi := PSILine{}
- _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
- if err != nil {
- return PSIStats{}, err
- }
- psiStats.Full = &psi
- default:
- // If we encounter a line with an unknown prefix, ignore it and move on
- // Should new measurement types be added in the future we'll simply ignore them instead
- // of erroring on retrieval
- continue
- }
- }
-
- return psiStats, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
deleted file mode 100644
index ad8785a407..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "fmt"
- "os"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
- procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
-)
-
-type ProcSMapsRollup struct {
- // Amount of the mapping that is currently resident in RAM.
- Rss uint64
- // Process's proportional share of this mapping.
- Pss uint64
- // Size in bytes of clean shared pages.
- SharedClean uint64
- // Size in bytes of dirty shared pages.
- SharedDirty uint64
- // Size in bytes of clean private pages.
- PrivateClean uint64
- // Size in bytes of dirty private pages.
- PrivateDirty uint64
- // Amount of memory currently marked as referenced or accessed.
- Referenced uint64
- // Amount of memory that does not belong to any file.
- Anonymous uint64
- // Amount would-be-anonymous memory currently on swap.
- Swap uint64
- // Process's proportional memory on swap.
- SwapPss uint64
-}
-
-// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
-// process.
-//
-// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
-// we read and summed.
-func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
- data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
- if err != nil && os.IsNotExist(err) {
- return p.procSMapsRollupManual()
- }
- if err != nil {
- return ProcSMapsRollup{}, err
- }
-
- lines := strings.Split(string(data), "\n")
- smaps := ProcSMapsRollup{}
-
- // skip first line which don't contains information we need
- lines = lines[1:]
- for _, line := range lines {
- if line == "" {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-// Read /proc/pid/smaps and do the roll-up in Go code.
-func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
- file, err := os.Open(p.path("smaps"))
- if err != nil {
- return ProcSMapsRollup{}, err
- }
- defer file.Close()
-
- smaps := ProcSMapsRollup{}
- scan := bufio.NewScanner(file)
-
- for scan.Scan() {
- line := scan.Text()
-
- if procSMapsHeaderLine.MatchString(line) {
- continue
- }
-
- if err := smaps.parseLine(line); err != nil {
- return ProcSMapsRollup{}, err
- }
- }
-
- return smaps, nil
-}
-
-func (s *ProcSMapsRollup) parseLine(line string) error {
- kv := strings.SplitN(line, ":", 2)
- if len(kv) != 2 {
- fmt.Println(line)
- return errors.New("invalid net/dev line, missing colon")
- }
-
- k := kv[0]
- if k == "VmFlags" {
- return nil
- }
-
- v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
-
- vKBytes, err := strconv.ParseUint(v, 10, 64)
- if err != nil {
- return err
- }
- vBytes := vKBytes * 1024
-
- s.addValue(k, vBytes)
-
- return nil
-}
-
-func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
- switch k {
- case "Rss":
- s.Rss += vUintBytes
- case "Pss":
- s.Pss += vUintBytes
- case "Shared_Clean":
- s.SharedClean += vUintBytes
- case "Shared_Dirty":
- s.SharedDirty += vUintBytes
- case "Private_Clean":
- s.PrivateClean += vUintBytes
- case "Private_Dirty":
- s.PrivateDirty += vUintBytes
- case "Referenced":
- s.Referenced += vUintBytes
- case "Anonymous":
- s.Anonymous += vUintBytes
- case "Swap":
- s.Swap += vUintBytes
- case "SwapPss":
- s.SwapPss += vUintBytes
- }
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
deleted file mode 100644
index b9d2cf642a..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcSnmp models the content of /proc//net/snmp.
-type ProcSnmp struct {
- // The process ID.
- PID int
- Ip
- Icmp
- IcmpMsg
- Tcp
- Udp
- UdpLite
-}
-
-type Ip struct { // nolint:revive
- Forwarding *float64
- DefaultTTL *float64
- InReceives *float64
- InHdrErrors *float64
- InAddrErrors *float64
- ForwDatagrams *float64
- InUnknownProtos *float64
- InDiscards *float64
- InDelivers *float64
- OutRequests *float64
- OutDiscards *float64
- OutNoRoutes *float64
- ReasmTimeout *float64
- ReasmReqds *float64
- ReasmOKs *float64
- ReasmFails *float64
- FragOKs *float64
- FragFails *float64
- FragCreates *float64
-}
-
-type Icmp struct { // nolint:revive
- InMsgs *float64
- InErrors *float64
- InCsumErrors *float64
- InDestUnreachs *float64
- InTimeExcds *float64
- InParmProbs *float64
- InSrcQuenchs *float64
- InRedirects *float64
- InEchos *float64
- InEchoReps *float64
- InTimestamps *float64
- InTimestampReps *float64
- InAddrMasks *float64
- InAddrMaskReps *float64
- OutMsgs *float64
- OutErrors *float64
- OutDestUnreachs *float64
- OutTimeExcds *float64
- OutParmProbs *float64
- OutSrcQuenchs *float64
- OutRedirects *float64
- OutEchos *float64
- OutEchoReps *float64
- OutTimestamps *float64
- OutTimestampReps *float64
- OutAddrMasks *float64
- OutAddrMaskReps *float64
-}
-
-type IcmpMsg struct {
- InType3 *float64
- OutType3 *float64
-}
-
-type Tcp struct { // nolint:revive
- RtoAlgorithm *float64
- RtoMin *float64
- RtoMax *float64
- MaxConn *float64
- ActiveOpens *float64
- PassiveOpens *float64
- AttemptFails *float64
- EstabResets *float64
- CurrEstab *float64
- InSegs *float64
- OutSegs *float64
- RetransSegs *float64
- InErrs *float64
- OutRsts *float64
- InCsumErrors *float64
-}
-
-type Udp struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-type UdpLite struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-func (p Proc) Snmp() (ProcSnmp, error) {
- filename := p.path("net/snmp")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- return ProcSnmp{PID: p.PID}, err
- }
- procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
- procSnmp.PID = p.PID
- return procSnmp, err
-}
-
-// parseSnmp parses the metrics from proc//net/snmp file
-// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
-func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
- var (
- scanner = bufio.NewScanner(r)
- procSnmp = ProcSnmp{}
- )
-
- for scanner.Scan() {
- nameParts := strings.Split(scanner.Text(), " ")
- scanner.Scan()
- valueParts := strings.Split(scanner.Text(), " ")
- // Remove trailing :.
- protocol := strings.TrimSuffix(nameParts[0], ":")
- if len(nameParts) != len(valueParts) {
- return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
- ErrFileParse, fileName, protocol)
- }
- for i := 1; i < len(nameParts); i++ {
- value, err := strconv.ParseFloat(valueParts[i], 64)
- if err != nil {
- return procSnmp, err
- }
- key := nameParts[i]
-
- switch protocol {
- case "Ip":
- switch key {
- case "Forwarding":
- procSnmp.Ip.Forwarding = &value
- case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = &value
- case "InReceives":
- procSnmp.Ip.InReceives = &value
- case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = &value
- case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = &value
- case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = &value
- case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = &value
- case "InDiscards":
- procSnmp.Ip.InDiscards = &value
- case "InDelivers":
- procSnmp.Ip.InDelivers = &value
- case "OutRequests":
- procSnmp.Ip.OutRequests = &value
- case "OutDiscards":
- procSnmp.Ip.OutDiscards = &value
- case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = &value
- case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = &value
- case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = &value
- case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = &value
- case "ReasmFails":
- procSnmp.Ip.ReasmFails = &value
- case "FragOKs":
- procSnmp.Ip.FragOKs = &value
- case "FragFails":
- procSnmp.Ip.FragFails = &value
- case "FragCreates":
- procSnmp.Ip.FragCreates = &value
- }
- case "Icmp":
- switch key {
- case "InMsgs":
- procSnmp.Icmp.InMsgs = &value
- case "InErrors":
- procSnmp.Icmp.InErrors = &value
- case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = &value
- case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = &value
- case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = &value
- case "InParmProbs":
- procSnmp.Icmp.InParmProbs = &value
- case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = &value
- case "InRedirects":
- procSnmp.Icmp.InRedirects = &value
- case "InEchos":
- procSnmp.Icmp.InEchos = &value
- case "InEchoReps":
- procSnmp.Icmp.InEchoReps = &value
- case "InTimestamps":
- procSnmp.Icmp.InTimestamps = &value
- case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = &value
- case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = &value
- case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = &value
- case "OutMsgs":
- procSnmp.Icmp.OutMsgs = &value
- case "OutErrors":
- procSnmp.Icmp.OutErrors = &value
- case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = &value
- case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = &value
- case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = &value
- case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = &value
- case "OutRedirects":
- procSnmp.Icmp.OutRedirects = &value
- case "OutEchos":
- procSnmp.Icmp.OutEchos = &value
- case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = &value
- case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = &value
- case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = &value
- case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = &value
- case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = &value
- }
- case "IcmpMsg":
- switch key {
- case "InType3":
- procSnmp.IcmpMsg.InType3 = &value
- case "OutType3":
- procSnmp.IcmpMsg.OutType3 = &value
- }
- case "Tcp":
- switch key {
- case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = &value
- case "RtoMin":
- procSnmp.Tcp.RtoMin = &value
- case "RtoMax":
- procSnmp.Tcp.RtoMax = &value
- case "MaxConn":
- procSnmp.Tcp.MaxConn = &value
- case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = &value
- case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = &value
- case "AttemptFails":
- procSnmp.Tcp.AttemptFails = &value
- case "EstabResets":
- procSnmp.Tcp.EstabResets = &value
- case "CurrEstab":
- procSnmp.Tcp.CurrEstab = &value
- case "InSegs":
- procSnmp.Tcp.InSegs = &value
- case "OutSegs":
- procSnmp.Tcp.OutSegs = &value
- case "RetransSegs":
- procSnmp.Tcp.RetransSegs = &value
- case "InErrs":
- procSnmp.Tcp.InErrs = &value
- case "OutRsts":
- procSnmp.Tcp.OutRsts = &value
- case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = &value
- }
- case "Udp":
- switch key {
- case "InDatagrams":
- procSnmp.Udp.InDatagrams = &value
- case "NoPorts":
- procSnmp.Udp.NoPorts = &value
- case "InErrors":
- procSnmp.Udp.InErrors = &value
- case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = &value
- }
- case "UdpLite":
- switch key {
- case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = &value
- case "NoPorts":
- procSnmp.UdpLite.NoPorts = &value
- case "InErrors":
- procSnmp.UdpLite.InErrors = &value
- case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = &value
- }
- }
- }
- }
- return procSnmp, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
deleted file mode 100644
index 3059cc6a13..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "os"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcSnmp6 models the content of /proc//net/snmp6.
-type ProcSnmp6 struct {
- // The process ID.
- PID int
- Ip6
- Icmp6
- Udp6
- UdpLite6
-}
-
-type Ip6 struct { // nolint:revive
- InReceives *float64
- InHdrErrors *float64
- InTooBigErrors *float64
- InNoRoutes *float64
- InAddrErrors *float64
- InUnknownProtos *float64
- InTruncatedPkts *float64
- InDiscards *float64
- InDelivers *float64
- OutForwDatagrams *float64
- OutRequests *float64
- OutDiscards *float64
- OutNoRoutes *float64
- ReasmTimeout *float64
- ReasmReqds *float64
- ReasmOKs *float64
- ReasmFails *float64
- FragOKs *float64
- FragFails *float64
- FragCreates *float64
- InMcastPkts *float64
- OutMcastPkts *float64
- InOctets *float64
- OutOctets *float64
- InMcastOctets *float64
- OutMcastOctets *float64
- InBcastOctets *float64
- OutBcastOctets *float64
- InNoECTPkts *float64
- InECT1Pkts *float64
- InECT0Pkts *float64
- InCEPkts *float64
-}
-
-type Icmp6 struct {
- InMsgs *float64
- InErrors *float64
- OutMsgs *float64
- OutErrors *float64
- InCsumErrors *float64
- InDestUnreachs *float64
- InPktTooBigs *float64
- InTimeExcds *float64
- InParmProblems *float64
- InEchos *float64
- InEchoReplies *float64
- InGroupMembQueries *float64
- InGroupMembResponses *float64
- InGroupMembReductions *float64
- InRouterSolicits *float64
- InRouterAdvertisements *float64
- InNeighborSolicits *float64
- InNeighborAdvertisements *float64
- InRedirects *float64
- InMLDv2Reports *float64
- OutDestUnreachs *float64
- OutPktTooBigs *float64
- OutTimeExcds *float64
- OutParmProblems *float64
- OutEchos *float64
- OutEchoReplies *float64
- OutGroupMembQueries *float64
- OutGroupMembResponses *float64
- OutGroupMembReductions *float64
- OutRouterSolicits *float64
- OutRouterAdvertisements *float64
- OutNeighborSolicits *float64
- OutNeighborAdvertisements *float64
- OutRedirects *float64
- OutMLDv2Reports *float64
- InType1 *float64
- InType134 *float64
- InType135 *float64
- InType136 *float64
- InType143 *float64
- OutType133 *float64
- OutType135 *float64
- OutType136 *float64
- OutType143 *float64
-}
-
-type Udp6 struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
- IgnoredMulti *float64
-}
-
-type UdpLite6 struct { // nolint:revive
- InDatagrams *float64
- NoPorts *float64
- InErrors *float64
- OutDatagrams *float64
- RcvbufErrors *float64
- SndbufErrors *float64
- InCsumErrors *float64
-}
-
-func (p Proc) Snmp6() (ProcSnmp6, error) {
- filename := p.path("net/snmp6")
- data, err := util.ReadFileNoStat(filename)
- if err != nil {
- // On systems with IPv6 disabled, this file won't exist.
- // Do nothing.
- if errors.Is(err, os.ErrNotExist) {
- return ProcSnmp6{PID: p.PID}, nil
- }
-
- return ProcSnmp6{PID: p.PID}, err
- }
-
- procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
- procSnmp6.PID = p.PID
- return procSnmp6, err
-}
-
-// parseSnmp6 parses the metrics from proc//net/snmp6 file
-// and returns a map contains those metrics.
-func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
- var (
- scanner = bufio.NewScanner(r)
- procSnmp6 = ProcSnmp6{}
- )
-
- for scanner.Scan() {
- stat := strings.Fields(scanner.Text())
- if len(stat) < 2 {
- continue
- }
- // Expect to have "6" in metric name, skip line otherwise
- if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
- protocol := stat[0][:sixIndex+1]
- key := stat[0][sixIndex+1:]
- value, err := strconv.ParseFloat(stat[1], 64)
- if err != nil {
- return procSnmp6, err
- }
-
- switch protocol {
- case "Ip6":
- switch key {
- case "InReceives":
- procSnmp6.Ip6.InReceives = &value
- case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = &value
- case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = &value
- case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = &value
- case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = &value
- case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = &value
- case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = &value
- case "InDiscards":
- procSnmp6.Ip6.InDiscards = &value
- case "InDelivers":
- procSnmp6.Ip6.InDelivers = &value
- case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = &value
- case "OutRequests":
- procSnmp6.Ip6.OutRequests = &value
- case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = &value
- case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = &value
- case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = &value
- case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = &value
- case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = &value
- case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = &value
- case "FragOKs":
- procSnmp6.Ip6.FragOKs = &value
- case "FragFails":
- procSnmp6.Ip6.FragFails = &value
- case "FragCreates":
- procSnmp6.Ip6.FragCreates = &value
- case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = &value
- case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = &value
- case "InOctets":
- procSnmp6.Ip6.InOctets = &value
- case "OutOctets":
- procSnmp6.Ip6.OutOctets = &value
- case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = &value
- case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = &value
- case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = &value
- case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = &value
- case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = &value
- case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = &value
- case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = &value
- case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = &value
-
- }
- case "Icmp6":
- switch key {
- case "InMsgs":
- procSnmp6.Icmp6.InMsgs = &value
- case "InErrors":
- procSnmp6.Icmp6.InErrors = &value
- case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = &value
- case "OutErrors":
- procSnmp6.Icmp6.OutErrors = &value
- case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = &value
- case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = &value
- case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = &value
- case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = &value
- case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = &value
- case "InEchos":
- procSnmp6.Icmp6.InEchos = &value
- case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = &value
- case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = &value
- case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = &value
- case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = &value
- case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = &value
- case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = &value
- case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = &value
- case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = &value
- case "InRedirects":
- procSnmp6.Icmp6.InRedirects = &value
- case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = &value
- case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = &value
- case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = &value
- case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = &value
- case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = &value
- case "OutEchos":
- procSnmp6.Icmp6.OutEchos = &value
- case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = &value
- case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = &value
- case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = &value
- case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = &value
- case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = &value
- case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = &value
- case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = &value
- case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = &value
- case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = &value
- case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = &value
- case "InType1":
- procSnmp6.Icmp6.InType1 = &value
- case "InType134":
- procSnmp6.Icmp6.InType134 = &value
- case "InType135":
- procSnmp6.Icmp6.InType135 = &value
- case "InType136":
- procSnmp6.Icmp6.InType136 = &value
- case "InType143":
- procSnmp6.Icmp6.InType143 = &value
- case "OutType133":
- procSnmp6.Icmp6.OutType133 = &value
- case "OutType135":
- procSnmp6.Icmp6.OutType135 = &value
- case "OutType136":
- procSnmp6.Icmp6.OutType136 = &value
- case "OutType143":
- procSnmp6.Icmp6.OutType143 = &value
- }
- case "Udp6":
- switch key {
- case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = &value
- case "NoPorts":
- procSnmp6.Udp6.NoPorts = &value
- case "InErrors":
- procSnmp6.Udp6.InErrors = &value
- case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = &value
- case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = &value
- }
- case "UdpLite6":
- switch key {
- case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = &value
- case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = &value
- case "InErrors":
- procSnmp6.UdpLite6.InErrors = &value
- case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = &value
- case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = &value
- case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = &value
- case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = &value
- }
- }
- }
- }
- return procSnmp6, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
deleted file mode 100644
index 06a8d931c9..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "os"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
-// which required cgo. However, that caused a lot of problems regarding
-// cross-compilation. Alternatives such as running a binary to determine the
-// value, or trying to derive it in some other way were all problematic. After
-// much research it was determined that USER_HZ is actually hardcoded to 100 on
-// all Go-supported platforms as of the time of this writing. This is why we
-// decided to hardcode it here as well. It is not impossible that there could
-// be systems with exceptions, but they should be very exotic edge cases, and
-// in that case, the worst outcome will be two misreported metrics.
-//
-// See also the following discussions:
-//
-// - https://github.com/prometheus/node_exporter/issues/52
-// - https://github.com/prometheus/procfs/pull/2
-// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
-const userHZ = 100
-
-// ProcStat provides status information about the process,
-// read from /proc/[pid]/stat.
-type ProcStat struct {
- // The process ID.
- PID int
- // The filename of the executable.
- Comm string
- // The process state.
- State string
- // The PID of the parent of this process.
- PPID int
- // The process group ID of the process.
- PGRP int
- // The session ID of the process.
- Session int
- // The controlling terminal of the process.
- TTY int
- // The ID of the foreground process group of the controlling terminal of
- // the process.
- TPGID int
- // The kernel flags word of the process.
- Flags uint
- // The number of minor faults the process has made which have not required
- // loading a memory page from disk.
- MinFlt uint
- // The number of minor faults that the process's waited-for children have
- // made.
- CMinFlt uint
- // The number of major faults the process has made which have required
- // loading a memory page from disk.
- MajFlt uint
- // The number of major faults that the process's waited-for children have
- // made.
- CMajFlt uint
- // Amount of time that this process has been scheduled in user mode,
- // measured in clock ticks.
- UTime uint
- // Amount of time that this process has been scheduled in kernel mode,
- // measured in clock ticks.
- STime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in user mode, measured in clock ticks.
- CUTime int
- // Amount of time that this process's waited-for children have been
- // scheduled in kernel mode, measured in clock ticks.
- CSTime int
- // For processes running a real-time scheduling policy, this is the negated
- // scheduling priority, minus one.
- Priority int
- // The nice value, a value in the range 19 (low priority) to -20 (high
- // priority).
- Nice int
- // Number of threads in this process.
- NumThreads int
- // The time the process started after system boot, the value is expressed
- // in clock ticks.
- Starttime uint64
- // Virtual memory size in bytes.
- VSize uint
- // Resident set size in pages.
- RSS int
- // Soft limit in bytes on the rss of the process.
- RSSLimit uint64
- // CPU number last executed on.
- Processor uint
- // Real-time scheduling priority, a number in the range 1 to 99 for processes
- // scheduled under a real-time policy, or 0, for non-real-time processes.
- RTPriority uint
- // Scheduling policy.
- Policy uint
- // Aggregated block I/O delays, measured in clock ticks (centiseconds).
- DelayAcctBlkIOTicks uint64
- // Guest time of the process (time spent running a virtual CPU for a guest
- // operating system), measured in clock ticks.
- GuestTime int
- // Guest time of the process's children, measured in clock ticks.
- CGuestTime int
-
- proc FS
-}
-
-// NewStat returns the current status information of the process.
-//
-// Deprecated: Use p.Stat() instead.
-func (p Proc) NewStat() (ProcStat, error) {
- return p.Stat()
-}
-
-// Stat returns the current status information of the process.
-func (p Proc) Stat() (ProcStat, error) {
- data, err := util.ReadFileNoStat(p.path("stat"))
- if err != nil {
- return ProcStat{}, err
- }
-
- var (
- ignoreInt64 int64
- ignoreUint64 uint64
-
- s = ProcStat{PID: p.PID, proc: p.fs}
- l = bytes.Index(data, []byte("("))
- r = bytes.LastIndex(data, []byte(")"))
- )
-
- if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
- }
-
- s.Comm = string(data[l+1 : r])
-
- // Check the following resources for the details about the particular stat
- // fields and their data types:
- // * https://man7.org/linux/man-pages/man5/proc.5.html
- // * https://man7.org/linux/man-pages/man3/scanf.3.html
- _, err = fmt.Fscan(
- bytes.NewBuffer(data[r+2:]),
- &s.State,
- &s.PPID,
- &s.PGRP,
- &s.Session,
- &s.TTY,
- &s.TPGID,
- &s.Flags,
- &s.MinFlt,
- &s.CMinFlt,
- &s.MajFlt,
- &s.CMajFlt,
- &s.UTime,
- &s.STime,
- &s.CUTime,
- &s.CSTime,
- &s.Priority,
- &s.Nice,
- &s.NumThreads,
- &ignoreInt64,
- &s.Starttime,
- &s.VSize,
- &s.RSS,
- &s.RSSLimit,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreUint64,
- &ignoreInt64,
- &s.Processor,
- &s.RTPriority,
- &s.Policy,
- &s.DelayAcctBlkIOTicks,
- &s.GuestTime,
- &s.CGuestTime,
- )
- if err != nil {
- return ProcStat{}, err
- }
-
- return s, nil
-}
-
-// VirtualMemory returns the virtual memory size in bytes.
-func (s ProcStat) VirtualMemory() uint {
- return s.VSize
-}
-
-// ResidentMemory returns the resident memory size in bytes.
-func (s ProcStat) ResidentMemory() int {
- return s.RSS * os.Getpagesize()
-}
-
-// StartTime returns the unix timestamp of the process in seconds.
-func (s ProcStat) StartTime() (float64, error) {
- stat, err := s.proc.Stat()
- if err != nil {
- return 0, err
- }
- return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
-}
-
-// CPUTime returns the total CPU user and system time in seconds.
-func (s ProcStat) CPUTime() float64 {
- return float64(s.UTime+s.STime) / userHZ
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
deleted file mode 100644
index a055197c63..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "math/bits"
- "sort"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// ProcStatus provides status information about the process,
-// read from /proc/[pid]/status.
-type ProcStatus struct {
- // The process ID.
- PID int
- // The process name.
- Name string
-
- // Thread group ID.
- TGID int
- // List of Pid namespace.
- NSpids []uint64
-
- // Peak virtual memory size.
- VmPeak uint64 // nolint:revive
- // Virtual memory size.
- VmSize uint64 // nolint:revive
- // Locked memory size.
- VmLck uint64 // nolint:revive
- // Pinned memory size.
- VmPin uint64 // nolint:revive
- // Peak resident set size.
- VmHWM uint64 // nolint:revive
- // Resident set size (sum of RssAnnon RssFile and RssShmem).
- VmRSS uint64 // nolint:revive
- // Size of resident anonymous memory.
- RssAnon uint64 // nolint:revive
- // Size of resident file mappings.
- RssFile uint64 // nolint:revive
- // Size of resident shared memory.
- RssShmem uint64 // nolint:revive
- // Size of data segments.
- VmData uint64 // nolint:revive
- // Size of stack segments.
- VmStk uint64 // nolint:revive
- // Size of text segments.
- VmExe uint64 // nolint:revive
- // Shared library code size.
- VmLib uint64 // nolint:revive
- // Page table entries size.
- VmPTE uint64 // nolint:revive
- // Size of second-level page tables.
- VmPMD uint64 // nolint:revive
- // Swapped-out virtual memory size by anonymous private.
- VmSwap uint64 // nolint:revive
- // Size of hugetlb memory portions
- HugetlbPages uint64
-
- // Number of voluntary context switches.
- VoluntaryCtxtSwitches uint64
- // Number of involuntary context switches.
- NonVoluntaryCtxtSwitches uint64
-
- // UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]uint64
- // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]uint64
-
- // CpusAllowedList: List of cpu cores processes are allowed to run on.
- CpusAllowedList []uint64
-}
-
-// NewStatus returns the current status information of the process.
-func (p Proc) NewStatus() (ProcStatus, error) {
- data, err := util.ReadFileNoStat(p.path("status"))
- if err != nil {
- return ProcStatus{}, err
- }
-
- s := ProcStatus{PID: p.PID}
-
- lines := strings.Split(string(data), "\n")
- for _, line := range lines {
- if !bytes.Contains([]byte(line), []byte(":")) {
- continue
- }
-
- kv := strings.SplitN(line, ":", 2)
-
- // removes spaces
- k := strings.TrimSpace(kv[0])
- v := strings.TrimSpace(kv[1])
- // removes "kB"
- v = strings.TrimSuffix(v, " kB")
-
- // value to int when possible
- // we can skip error check here, 'cause vKBytes is not used when value is a string
- vKBytes, _ := strconv.ParseUint(v, 10, 64)
- // convert kB to B
- vBytes := vKBytes * 1024
-
- err = s.fillStatus(k, v, vKBytes, vBytes)
- if err != nil {
- return ProcStatus{}, err
- }
- }
-
- return s, nil
-}
-
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
- switch k {
- case "Tgid":
- s.TGID = int(vUint)
- case "Name":
- s.Name = vString
- case "Uid":
- var err error
- for i, v := range strings.Split(vString, "\t") {
- s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
- if err != nil {
- return err
- }
- }
- case "Gid":
- var err error
- for i, v := range strings.Split(vString, "\t") {
- s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
- if err != nil {
- return err
- }
- }
- case "NSpid":
- s.NSpids = calcNSPidsList(vString)
- case "VmPeak":
- s.VmPeak = vUintBytes
- case "VmSize":
- s.VmSize = vUintBytes
- case "VmLck":
- s.VmLck = vUintBytes
- case "VmPin":
- s.VmPin = vUintBytes
- case "VmHWM":
- s.VmHWM = vUintBytes
- case "VmRSS":
- s.VmRSS = vUintBytes
- case "RssAnon":
- s.RssAnon = vUintBytes
- case "RssFile":
- s.RssFile = vUintBytes
- case "RssShmem":
- s.RssShmem = vUintBytes
- case "VmData":
- s.VmData = vUintBytes
- case "VmStk":
- s.VmStk = vUintBytes
- case "VmExe":
- s.VmExe = vUintBytes
- case "VmLib":
- s.VmLib = vUintBytes
- case "VmPTE":
- s.VmPTE = vUintBytes
- case "VmPMD":
- s.VmPMD = vUintBytes
- case "VmSwap":
- s.VmSwap = vUintBytes
- case "HugetlbPages":
- s.HugetlbPages = vUintBytes
- case "voluntary_ctxt_switches":
- s.VoluntaryCtxtSwitches = vUint
- case "nonvoluntary_ctxt_switches":
- s.NonVoluntaryCtxtSwitches = vUint
- case "Cpus_allowed_list":
- s.CpusAllowedList = calcCpusAllowedList(vString)
- }
-
- return nil
-}
-
-// TotalCtxtSwitches returns the total context switch.
-func (s ProcStatus) TotalCtxtSwitches() uint64 {
- return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
-}
-
-func calcCpusAllowedList(cpuString string) []uint64 {
- s := strings.Split(cpuString, ",")
-
- var g []uint64
-
- for _, cpu := range s {
- // parse cpu ranges, example: 1-3=[1,2,3]
- if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
- startCPU, _ := strconv.ParseUint(l[0], 10, 64)
- endCPU, _ := strconv.ParseUint(l[1], 10, 64)
-
- for i := startCPU; i <= endCPU; i++ {
- g = append(g, i)
- }
- } else if len(l) == 1 {
- cpu, _ := strconv.ParseUint(l[0], 10, 64)
- g = append(g, cpu)
- }
-
- }
-
- sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
- return g
-}
-
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
- var nspids []uint64
-
- for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
- }
- nspids = append(nspids, nspid)
- }
-
- return nspids
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
deleted file mode 100644
index 5eefbe2ef8..0000000000
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-func sysctlToPath(sysctl string) string {
- return strings.Replace(sysctl, ".", "/", -1)
-}
-
-func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
- value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
- if err != nil {
- return nil, err
- }
- return strings.Fields(value), nil
-
-}
-
-func (fs FS) SysctlInts(sysctl string) ([]int, error) {
- fields, err := fs.SysctlStrings(sysctl)
- if err != nil {
- return nil, err
- }
-
- values := make([]int, len(fields))
- for i, f := range fields {
- vp := util.NewValueParser(f)
- values[i] = vp.Int()
- if err := vp.Err(); err != nil {
- return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
- }
- }
- return values, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
deleted file mode 100644
index 5f7f32dc83..0000000000
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "regexp"
- "strconv"
-)
-
-var (
- cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
- procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
-)
-
-// Schedstat contains scheduler statistics from /proc/schedstat
-//
-// See
-// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
-// for a detailed description of what these numbers mean.
-//
-// Note the current kernel documentation claims some of the time units are in
-// jiffies when they are actually in nanoseconds since 2.6.23 with the
-// introduction of CFS. A fix to the documentation is pending. See
-// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
-type Schedstat struct {
- CPUs []*SchedstatCPU
-}
-
-// SchedstatCPU contains the values from one "cpu" line.
-type SchedstatCPU struct {
- CPUNum string
-
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// ProcSchedstat contains the values from `/proc//schedstat`.
-type ProcSchedstat struct {
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// Schedstat reads data from `/proc/schedstat`.
-func (fs FS) Schedstat() (*Schedstat, error) {
- file, err := os.Open(fs.proc.Path("schedstat"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- stats := &Schedstat{}
- scanner := bufio.NewScanner(file)
-
- for scanner.Scan() {
- match := cpuLineRE.FindStringSubmatch(scanner.Text())
- if match != nil {
- cpu := &SchedstatCPU{}
- cpu.CPUNum = match[1]
-
- cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
- if err != nil {
- continue
- }
-
- stats.CPUs = append(stats.CPUs, cpu)
- }
- }
-
- return stats, nil
-}
-
-func parseProcSchedstat(contents string) (ProcSchedstat, error) {
- var (
- stats ProcSchedstat
- err error
- )
- match := procLineRE.FindStringSubmatch(contents)
-
- if match != nil {
- stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
- if err != nil {
- return stats, err
- }
-
- stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
- return stats, err
- }
-
- return stats, errors.New("could not parse schedstat")
-}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
deleted file mode 100644
index 8611c90177..0000000000
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-var (
- slabSpace = regexp.MustCompile(`\s+`)
- slabVer = regexp.MustCompile(`slabinfo -`)
- slabHeader = regexp.MustCompile(`# name`)
-)
-
-// Slab represents a slab pool in the kernel.
-type Slab struct {
- Name string
- ObjActive int64
- ObjNum int64
- ObjSize int64
- ObjPerSlab int64
- PagesPerSlab int64
- // tunables
- Limit int64
- Batch int64
- SharedFactor int64
- SlabActive int64
- SlabNum int64
- SharedAvail int64
-}
-
-// SlabInfo represents info for all slabs.
-type SlabInfo struct {
- Slabs []*Slab
-}
-
-func shouldParseSlab(line string) bool {
- if slabVer.MatchString(line) {
- return false
- }
- if slabHeader.MatchString(line) {
- return false
- }
- return true
-}
-
-// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
-func parseV21SlabEntry(line string) (*Slab, error) {
- // First cleanup whitespace.
- l := slabSpace.ReplaceAllString(line, " ")
- s := strings.Split(l, " ")
- if len(s) != 16 {
- return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
- }
- var err error
- i := &Slab{Name: s[0]}
- i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
- if err != nil {
- return nil, err
- }
- i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
- if err != nil {
- return nil, err
- }
- i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Limit, err = strconv.ParseInt(s[8], 10, 64)
- if err != nil {
- return nil, err
- }
- i.Batch, err = strconv.ParseInt(s[9], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
- if err != nil {
- return nil, err
- }
- i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
- if err != nil {
- return nil, err
- }
- return i, nil
-}
-
-// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
-func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
- scanner := bufio.NewScanner(r)
- s := SlabInfo{Slabs: []*Slab{}}
- for scanner.Scan() {
- line := scanner.Text()
- if !shouldParseSlab(line) {
- continue
- }
- slab, err := parseV21SlabEntry(line)
- if err != nil {
- return s, err
- }
- s.Slabs = append(s.Slabs, slab)
- }
- return s, nil
-}
-
-// SlabInfo reads data from `/proc/slabinfo`.
-func (fs FS) SlabInfo() (SlabInfo, error) {
- // TODO: Consider passing options to allow for parsing different
- // slabinfo versions. However, slabinfo 2.1 has been stable since
- // kernel 2.6.10 and later.
- data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
- if err != nil {
- return SlabInfo{}, err
- }
-
- return parseSlabInfo21(bytes.NewReader(data))
-}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
deleted file mode 100644
index 28708e0745..0000000000
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Softirqs represents the softirq statistics.
-type Softirqs struct {
- Hi []uint64
- Timer []uint64
- NetTx []uint64
- NetRx []uint64
- Block []uint64
- IRQPoll []uint64
- Tasklet []uint64
- Sched []uint64
- HRTimer []uint64
- RCU []uint64
-}
-
-func (fs FS) Softirqs() (Softirqs, error) {
- fileName := fs.proc.Path("softirqs")
- data, err := util.ReadFileNoStat(fileName)
- if err != nil {
- return Softirqs{}, err
- }
-
- reader := bytes.NewReader(data)
-
- return parseSoftirqs(reader)
-}
-
-func parseSoftirqs(r io.Reader) (Softirqs, error) {
- var (
- softirqs = Softirqs{}
- scanner = bufio.NewScanner(r)
- )
-
- if !scanner.Scan() {
- return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
- }
-
- for scanner.Scan() {
- parts := strings.Fields(scanner.Text())
- var err error
-
- // require at least one cpu
- if len(parts) < 2 {
- continue
- }
- switch {
- case parts[0] == "HI:":
- perCPU := parts[1:]
- softirqs.Hi = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "TIMER:":
- perCPU := parts[1:]
- softirqs.Timer = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "NET_TX:":
- perCPU := parts[1:]
- softirqs.NetTx = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "NET_RX:":
- perCPU := parts[1:]
- softirqs.NetRx = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "BLOCK:":
- perCPU := parts[1:]
- softirqs.Block = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "IRQ_POLL:":
- perCPU := parts[1:]
- softirqs.IRQPoll = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "TASKLET:":
- perCPU := parts[1:]
- softirqs.Tasklet = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "SCHED:":
- perCPU := parts[1:]
- softirqs.Sched = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "HRTIMER:":
- perCPU := parts[1:]
- softirqs.HRTimer = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "RCU:":
- perCPU := parts[1:]
- softirqs.RCU = make([]uint64, len(perCPU))
- for i, count := range perCPU {
- if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
- }
- }
- }
- }
-
- if err := scanner.Err(); err != nil {
- return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
- }
-
- return softirqs, scanner.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
deleted file mode 100644
index e36b41c18a..0000000000
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/fs"
- "github.com/prometheus/procfs/internal/util"
-)
-
-// CPUStat shows how much time the cpu spend in various stages.
-type CPUStat struct {
- User float64
- Nice float64
- System float64
- Idle float64
- Iowait float64
- IRQ float64
- SoftIRQ float64
- Steal float64
- Guest float64
- GuestNice float64
-}
-
-// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
-// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading `/proc/softirqs`.
-type SoftIRQStat struct {
- Hi uint64
- Timer uint64
- NetTx uint64
- NetRx uint64
- Block uint64
- BlockIoPoll uint64
- Tasklet uint64
- Sched uint64
- Hrtimer uint64
- Rcu uint64
-}
-
-// Stat represents kernel/system statistics.
-type Stat struct {
- // Boot time in seconds since the Epoch.
- BootTime uint64
- // Summed up cpu statistics.
- CPUTotal CPUStat
- // Per-CPU statistics.
- CPU map[int64]CPUStat
- // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
- IRQTotal uint64
- // Number of times a numbered IRQ was triggered.
- IRQ []uint64
- // Number of times a context switch happened.
- ContextSwitches uint64
- // Number of times a process was created.
- ProcessCreated uint64
- // Number of processes currently running.
- ProcessesRunning uint64
- // Number of processes currently blocked (waiting for IO).
- ProcessesBlocked uint64
- // Number of times a softirq was scheduled.
- SoftIRQTotal uint64
- // Detailed softirq statistics.
- SoftIRQ SoftIRQStat
-}
-
-// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
-func parseCPUStat(line string) (CPUStat, int64, error) {
- cpuStat := CPUStat{}
- var cpu string
-
- count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
- &cpu,
- &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
- &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
- &cpuStat.Guest, &cpuStat.GuestNice)
-
- if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
- }
- if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
- }
-
- cpuStat.User /= userHZ
- cpuStat.Nice /= userHZ
- cpuStat.System /= userHZ
- cpuStat.Idle /= userHZ
- cpuStat.Iowait /= userHZ
- cpuStat.IRQ /= userHZ
- cpuStat.SoftIRQ /= userHZ
- cpuStat.Steal /= userHZ
- cpuStat.Guest /= userHZ
- cpuStat.GuestNice /= userHZ
-
- if cpu == "cpu" {
- return cpuStat, -1, nil
- }
-
- cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
- if err != nil {
- return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
- }
-
- return cpuStat, cpuID, nil
-}
-
-// Parse a softirq line.
-func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
- softIRQStat := SoftIRQStat{}
- var total uint64
- var prefix string
-
- _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
- &prefix, &total,
- &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
- &softIRQStat.Block, &softIRQStat.BlockIoPoll,
- &softIRQStat.Tasklet, &softIRQStat.Sched,
- &softIRQStat.Hrtimer, &softIRQStat.Rcu)
-
- if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
- }
-
- return softIRQStat, total, nil
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: Use fs.Stat() instead.
-func NewStat() (Stat, error) {
- fs, err := NewFS(fs.DefaultProcMountPoint)
- if err != nil {
- return Stat{}, err
- }
- return fs.Stat()
-}
-
-// NewStat returns information about current cpu/process statistics.
-// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-//
-// Deprecated: Use fs.Stat() instead.
-func (fs FS) NewStat() (Stat, error) {
- return fs.Stat()
-}
-
-// Stat returns information about current cpu/process statistics.
-// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) Stat() (Stat, error) {
- fileName := fs.proc.Path("stat")
- data, err := util.ReadFileNoStat(fileName)
- if err != nil {
- return Stat{}, err
- }
- procStat, err := parseStat(bytes.NewReader(data), fileName)
- if err != nil {
- return Stat{}, err
- }
- return procStat, nil
-}
-
-// parseStat parses the metrics from /proc/[pid]/stat.
-func parseStat(r io.Reader, fileName string) (Stat, error) {
- var (
- scanner = bufio.NewScanner(r)
- stat = Stat{
- CPU: make(map[int64]CPUStat),
- }
- err error
- )
-
- // Increase default scanner buffer to handle very long `intr` lines.
- buf := make([]byte, 0, 8*1024)
- scanner.Buffer(buf, 1024*1024)
-
- for scanner.Scan() {
- line := scanner.Text()
- parts := strings.Fields(scanner.Text())
- // require at least
- if len(parts) < 2 {
- continue
- }
- switch {
- case parts[0] == "btime":
- if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "intr":
- if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
- }
- numberedIRQs := parts[2:]
- stat.IRQ = make([]uint64, len(numberedIRQs))
- for i, count := range numberedIRQs {
- if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
- }
- }
- case parts[0] == "ctxt":
- if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "processes":
- if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "procs_running":
- if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "procs_blocked":
- if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
- }
- case parts[0] == "softirq":
- softIRQStats, total, err := parseSoftIRQStat(line)
- if err != nil {
- return Stat{}, err
- }
- stat.SoftIRQTotal = total
- stat.SoftIRQ = softIRQStats
- case strings.HasPrefix(parts[0], "cpu"):
- cpuStat, cpuID, err := parseCPUStat(line)
- if err != nil {
- return Stat{}, err
- }
- if cpuID == -1 {
- stat.CPUTotal = cpuStat
- } else {
- stat.CPU[cpuID] = cpuStat
- }
- }
- }
-
- if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
- }
-
- return stat, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
deleted file mode 100644
index 65fec834bf..0000000000
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Swap represents an entry in /proc/swaps.
-type Swap struct {
- Filename string
- Type string
- Size int
- Used int
- Priority int
-}
-
-// Swaps returns a slice of all configured swap devices on the system.
-func (fs FS) Swaps() ([]*Swap, error) {
- data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
- if err != nil {
- return nil, err
- }
- return parseSwaps(data)
-}
-
-func parseSwaps(info []byte) ([]*Swap, error) {
- swaps := []*Swap{}
- scanner := bufio.NewScanner(bytes.NewReader(info))
- scanner.Scan() // ignore header line
- for scanner.Scan() {
- swapString := scanner.Text()
- parsedSwap, err := parseSwapString(swapString)
- if err != nil {
- return nil, err
- }
- swaps = append(swaps, parsedSwap)
- }
-
- err := scanner.Err()
- return swaps, err
-}
-
-func parseSwapString(swapString string) (*Swap, error) {
- var err error
-
- swapFields := strings.Fields(swapString)
- swapLength := len(swapFields)
- if swapLength < 5 {
- return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
- }
-
- swap := &Swap{
- Filename: swapFields[0],
- Type: swapFields[1],
- }
-
- swap.Size, err = strconv.Atoi(swapFields[2])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
- }
- swap.Used, err = strconv.Atoi(swapFields[3])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
- }
- swap.Priority, err = strconv.Atoi(swapFields[4])
- if err != nil {
- return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
- }
-
- return swap, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
deleted file mode 100644
index 80e0e947be..0000000000
--- a/vendor/github.com/prometheus/procfs/thread.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "os"
- "strconv"
-
- fsi "github.com/prometheus/procfs/internal/fs"
-)
-
-// Provide access to /proc/PID/task/TID files, for thread specific values. Since
-// such files have the same structure as /proc/PID/ ones, the data structures
-// and the parsers for the latter may be reused.
-
-// AllThreads returns a list of all currently available threads under /proc/PID.
-func AllThreads(pid int) (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllThreads(pid)
-}
-
-// AllThreads returns a list of all currently available threads for PID.
-func (fs FS) AllThreads(pid int) (Procs, error) {
- taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
- d, err := os.Open(taskPath)
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
- }
-
- t := Procs{}
- for _, n := range names {
- tid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
-
- t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
- }
-
- return t, nil
-}
-
-// Thread returns a process for a given PID, TID.
-func (fs FS) Thread(pid, tid int) (Proc, error) {
- taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
- if _, err := os.Stat(taskPath); err != nil {
- return Proc{}, err
- }
- return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
-}
-
-// Thread returns a process for a given TID of Proc.
-func (proc Proc) Thread(tid int) (Proc, error) {
- tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
- if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: tid, fs: tfs}, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
deleted file mode 100644
index 19ef02b8d4..0000000000
--- a/vendor/github.com/prometheus/procfs/ttar
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/usr/bin/env bash
-
-# Purpose: plain text tar format
-# Limitations: - only suitable for text files, directories, and symlinks
-# - stores only filename, content, and mode
-# - not designed for untrusted input
-#
-# Note: must work with bash version 3.2 (macOS)
-
-# Copyright 2017 Roger Luethi
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -o errexit -o nounset
-
-# Sanitize environment (for instance, standard sorting of glob matches)
-export LC_ALL=C
-
-path=""
-CMD=""
-ARG_STRING="$*"
-
-#------------------------------------------------------------------------------
-# Not all sed implementations can work on null bytes. In order to make ttar
-# work out of the box on macOS, use Python as a stream editor.
-
-USE_PYTHON=0
-
-PYTHON_CREATE_FILTER=$(cat << 'PCF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'EOF', r'\EOF', line)
- line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
- line = re.sub('\x00', r'NULLBYTE', line)
- sys.stdout.write(line)
-PCF
-)
-
-PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
-#!/usr/bin/env python
-
-import re
-import sys
-
-for line in sys.stdin:
- line = re.sub(r'(?/dev/null; then
- echo "ERROR Python not found. Aborting."
- exit 2
- fi
- USE_PYTHON=1
- fi
-}
-
-#------------------------------------------------------------------------------
-
-function usage {
- bname=$(basename "$0")
- cat << USAGE
-Usage: $bname [-C ] -c -f (create archive)
- $bname -t -f (list archive contents)
- $bname [-C ] -x -f (extract archive)
-
-Options:
- -C (change directory)
- -v (verbose)
- --recursive-unlink (recursively delete existing directory if path
- collides with file or directory to extract)
-
-Example: Change to sysfs directory, create ttar file from fixtures directory
- $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
-USAGE
-exit "$1"
-}
-
-function vecho {
- if [ "${VERBOSE:-}" == "yes" ]; then
- echo >&7 "$@"
- fi
-}
-
-function set_cmd {
- if [ -n "$CMD" ]; then
- echo "ERROR: more than one command given"
- echo
- usage 2
- fi
- CMD=$1
-}
-
-unset VERBOSE
-unset RECURSIVE_UNLINK
-
-while getopts :cf:-:htxvC: opt; do
- case $opt in
- c)
- set_cmd "create"
- ;;
- f)
- ARCHIVE=$OPTARG
- ;;
- h)
- usage 0
- ;;
- t)
- set_cmd "list"
- ;;
- x)
- set_cmd "extract"
- ;;
- v)
- VERBOSE=yes
- exec 7>&1
- ;;
- C)
- CDIR=$OPTARG
- ;;
- -)
- case $OPTARG in
- recursive-unlink)
- RECURSIVE_UNLINK="yes"
- ;;
- *)
- echo -e "Error: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
- ;;
- *)
- echo >&2 "ERROR: invalid option -$OPTARG"
- echo
- usage 1
- ;;
- esac
-done
-
-# Remove processed options from arguments
-shift $(( OPTIND - 1 ));
-
-if [ "${CMD:-}" == "" ]; then
- echo >&2 "ERROR: no command given"
- echo
- usage 1
-elif [ "${ARCHIVE:-}" == "" ]; then
- echo >&2 "ERROR: no archive name given"
- echo
- usage 1
-fi
-
-function list {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while read -r line; do
- line_no=$(( line_no + 1 ))
- if [ $size -gt 0 ]; then
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- echo "$path"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- echo "$path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- echo "$path -> ${BASH_REMATCH[1]}"
- fi
- done < "$ttar_file"
-}
-
-function extract {
- local path=""
- local size=0
- local line_no=0
- local ttar_file=$1
- if [ -n "${2:-}" ]; then
- echo >&2 "ERROR: too many arguments."
- echo
- usage 1
- fi
- if [ ! -e "$ttar_file" ]; then
- echo >&2 "ERROR: file not found ($ttar_file)"
- echo
- usage 1
- fi
- while IFS= read -r line; do
- line_no=$(( line_no + 1 ))
- local eof_without_newline
- if [ "$size" -gt 0 ]; then
- if [[ "$line" =~ [^\\]EOF ]]; then
- # An EOF not preceded by a backslash indicates that the line
- # does not end with a newline
- eof_without_newline=1
- else
- eof_without_newline=0
- fi
- # Replace NULLBYTE with null byte if at beginning of line
- # Replace NULLBYTE with null byte unless preceded by backslash
- # Remove one backslash in front of NULLBYTE (if any)
- # Remove EOF unless preceded by backslash
- # Remove one backslash in front of EOF
- if [ $USE_PYTHON -eq 1 ]; then
- echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
- else
- # The repeated pattern makes up for sed's lack of negative
- # lookbehind assertions (for consecutive null bytes).
- echo -n "$line" | \
- sed -e 's/^NULLBYTE/\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\([^\\]\)NULLBYTE/\1\x0/g;
- s/\\NULLBYTE/NULLBYTE/g;
- s/\([^\\]\)EOF/\1/g;
- s/\\EOF/EOF/g;
- ' >> "$path"
- fi
- if [[ "$eof_without_newline" -eq 0 ]]; then
- echo >> "$path"
- fi
- size=$(( size - 1 ))
- continue
- fi
- if [[ $line =~ ^Path:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- if [ -L "$path" ]; then
- rm "$path"
- elif [ -d "$path" ]; then
- if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
- rm -r "$path"
- else
- # Safe because symlinks to directories are dealt with above
- rmdir "$path"
- fi
- elif [ -e "$path" ]; then
- rm "$path"
- fi
- elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
- size=${BASH_REMATCH[1]}
- # Create file even if it is zero-length.
- touch "$path"
- vecho " $path"
- elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
- mode=${BASH_REMATCH[1]}
- chmod "$mode" "$path"
- vecho "$mode"
- elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
- path=${BASH_REMATCH[1]}
- mkdir -p "$path"
- vecho " $path/"
- elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
- ln -s "${BASH_REMATCH[1]}" "$path"
- vecho " $path -> ${BASH_REMATCH[1]}"
- elif [[ $line =~ ^# ]]; then
- # Ignore comments between files
- continue
- else
- echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
- exit 1
- fi
- done < "$ttar_file"
-}
-
-function div {
- echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
- "- - - - - -"
-}
-
-function get_mode {
- local mfile=$1
- if [ -z "${STAT_OPTION:-}" ]; then
- if stat -c '%a' "$mfile" >/dev/null 2>&1; then
- # GNU stat
- STAT_OPTION='-c'
- STAT_FORMAT='%a'
- else
- # BSD stat
- STAT_OPTION='-f'
- # Octal output, user/group/other (omit file type, sticky bit)
- STAT_FORMAT='%OLp'
- fi
- fi
- stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
-}
-
-function _create {
- shopt -s nullglob
- local mode
- local eof_without_newline
- while (( "$#" )); do
- file=$1
- if [ -L "$file" ]; then
- echo "Path: $file"
- symlinkTo=$(readlink "$file")
- echo "SymlinkTo: $symlinkTo"
- vecho " $file -> $symlinkTo"
- div
- elif [ -d "$file" ]; then
- # Strip trailing slash (if there is one)
- file=${file%/}
- echo "Directory: $file"
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file/"
- div
- # Find all files and dirs, including hidden/dot files
- for x in "$file/"{*,.[^.]*}; do
- _create "$x"
- done
- elif [ -f "$file" ]; then
- echo "Path: $file"
- lines=$(wc -l "$file"|awk '{print $1}')
- eof_without_newline=0
- if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
- [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
- eof_without_newline=1
- lines=$((lines+1))
- fi
- echo "Lines: $lines"
- # Add backslash in front of EOF
- # Add backslash in front of NULLBYTE
- # Replace null byte with NULLBYTE
- if [ $USE_PYTHON -eq 1 ]; then
- < "$file" python -c "$PYTHON_CREATE_FILTER"
- else
- < "$file" \
- sed 's/EOF/\\EOF/g;
- s/NULLBYTE/\\NULLBYTE/g;
- s/\x0/NULLBYTE/g;
- '
- fi
- if [[ "$eof_without_newline" -eq 1 ]]; then
- # Finish line with EOF to indicate that the original line did
- # not end with a linefeed
- echo "EOF"
- fi
- mode=$(get_mode "$file")
- echo "Mode: $mode"
- vecho "$mode $file"
- div
- else
- echo >&2 "ERROR: file not found ($file in $(pwd))"
- exit 2
- fi
- shift
- done
-}
-
-function create {
- ttar_file=$1
- shift
- if [ -z "${1:-}" ]; then
- echo >&2 "ERROR: missing arguments."
- echo
- usage 1
- fi
- if [ -e "$ttar_file" ]; then
- rm "$ttar_file"
- fi
- exec > "$ttar_file"
- echo "# Archive created by ttar $ARG_STRING"
- _create "$@"
-}
-
-test_environment
-
-if [ -n "${CDIR:-}" ]; then
- if [[ "$ARCHIVE" != /* ]]; then
- # Relative path: preserve the archive's location before changing
- # directory
- ARCHIVE="$(pwd)/$ARCHIVE"
- fi
- cd "$CDIR"
-fi
-
-"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
deleted file mode 100644
index 51c49d89e8..0000000000
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// The VM interface is described at
-//
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-//
-// Each setting is exposed as a single file.
-// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string.
-type VM struct {
- AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
- BlockDump *int64 // /proc/sys/vm/block_dump
- CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
- DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
- DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
- DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
- DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
- DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
- DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
- DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
- DropCaches *int64 // /proc/sys/vm/drop_caches
- ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
- HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
- LaptopMode *int64 // /proc/sys/vm/laptop_mode
- LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
- LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
- MaxMapCount *int64 // /proc/sys/vm/max_map_count
- MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
- MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
- MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
- MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
- MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
- MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
- NrHugepages *int64 // /proc/sys/vm/nr_hugepages
- NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
- NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
- NumaStat *int64 // /proc/sys/vm/numa_stat
- NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
- OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
- OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
- OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
- OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
- OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
- PageCluster *int64 // /proc/sys/vm/page-cluster
- PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
- PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
- StatInterval *int64 // /proc/sys/vm/stat_interval
- Swappiness *int64 // /proc/sys/vm/swappiness
- UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
- VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
- WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
- WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
- ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
-}
-
-// VM reads the VM statistics from the specified `proc` filesystem.
-func (fs FS) VM() (*VM, error) {
- path := fs.proc.Path("sys/vm")
- file, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
- if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
- }
-
- files, err := os.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- var vm VM
- for _, f := range files {
- if f.IsDir() {
- continue
- }
-
- name := filepath.Join(path, f.Name())
- // ignore errors on read, as there are some write only
- // in /proc/sys/vm
- value, err := util.SysReadFile(name)
- if err != nil {
- continue
- }
- vp := util.NewValueParser(value)
-
- switch f.Name() {
- case "admin_reserve_kbytes":
- vm.AdminReserveKbytes = vp.PInt64()
- case "block_dump":
- vm.BlockDump = vp.PInt64()
- case "compact_unevictable_allowed":
- vm.CompactUnevictableAllowed = vp.PInt64()
- case "dirty_background_bytes":
- vm.DirtyBackgroundBytes = vp.PInt64()
- case "dirty_background_ratio":
- vm.DirtyBackgroundRatio = vp.PInt64()
- case "dirty_bytes":
- vm.DirtyBytes = vp.PInt64()
- case "dirty_expire_centisecs":
- vm.DirtyExpireCentisecs = vp.PInt64()
- case "dirty_ratio":
- vm.DirtyRatio = vp.PInt64()
- case "dirtytime_expire_seconds":
- vm.DirtytimeExpireSeconds = vp.PInt64()
- case "dirty_writeback_centisecs":
- vm.DirtyWritebackCentisecs = vp.PInt64()
- case "drop_caches":
- vm.DropCaches = vp.PInt64()
- case "extfrag_threshold":
- vm.ExtfragThreshold = vp.PInt64()
- case "hugetlb_shm_group":
- vm.HugetlbShmGroup = vp.PInt64()
- case "laptop_mode":
- vm.LaptopMode = vp.PInt64()
- case "legacy_va_layout":
- vm.LegacyVaLayout = vp.PInt64()
- case "lowmem_reserve_ratio":
- stringSlice := strings.Fields(value)
- pint64Slice := make([]*int64, 0, len(stringSlice))
- for _, value := range stringSlice {
- vp := util.NewValueParser(value)
- pint64Slice = append(pint64Slice, vp.PInt64())
- }
- vm.LowmemReserveRatio = pint64Slice
- case "max_map_count":
- vm.MaxMapCount = vp.PInt64()
- case "memory_failure_early_kill":
- vm.MemoryFailureEarlyKill = vp.PInt64()
- case "memory_failure_recovery":
- vm.MemoryFailureRecovery = vp.PInt64()
- case "min_free_kbytes":
- vm.MinFreeKbytes = vp.PInt64()
- case "min_slab_ratio":
- vm.MinSlabRatio = vp.PInt64()
- case "min_unmapped_ratio":
- vm.MinUnmappedRatio = vp.PInt64()
- case "mmap_min_addr":
- vm.MmapMinAddr = vp.PInt64()
- case "nr_hugepages":
- vm.NrHugepages = vp.PInt64()
- case "nr_hugepages_mempolicy":
- vm.NrHugepagesMempolicy = vp.PInt64()
- case "nr_overcommit_hugepages":
- vm.NrOvercommitHugepages = vp.PInt64()
- case "numa_stat":
- vm.NumaStat = vp.PInt64()
- case "numa_zonelist_order":
- vm.NumaZonelistOrder = value
- case "oom_dump_tasks":
- vm.OomDumpTasks = vp.PInt64()
- case "oom_kill_allocating_task":
- vm.OomKillAllocatingTask = vp.PInt64()
- case "overcommit_kbytes":
- vm.OvercommitKbytes = vp.PInt64()
- case "overcommit_memory":
- vm.OvercommitMemory = vp.PInt64()
- case "overcommit_ratio":
- vm.OvercommitRatio = vp.PInt64()
- case "page-cluster":
- vm.PageCluster = vp.PInt64()
- case "panic_on_oom":
- vm.PanicOnOom = vp.PInt64()
- case "percpu_pagelist_fraction":
- vm.PercpuPagelistFraction = vp.PInt64()
- case "stat_interval":
- vm.StatInterval = vp.PInt64()
- case "swappiness":
- vm.Swappiness = vp.PInt64()
- case "user_reserve_kbytes":
- vm.UserReserveKbytes = vp.PInt64()
- case "vfs_cache_pressure":
- vm.VfsCachePressure = vp.PInt64()
- case "watermark_boost_factor":
- vm.WatermarkBoostFactor = vp.PInt64()
- case "watermark_scale_factor":
- vm.WatermarkScaleFactor = vp.PInt64()
- case "zone_reclaim_mode":
- vm.ZoneReclaimMode = vp.PInt64()
- }
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return &vm, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
deleted file mode 100644
index e54d94b090..0000000000
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "os"
- "regexp"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Zoneinfo holds info parsed from /proc/zoneinfo.
-type Zoneinfo struct {
- Node string
- Zone string
- NrFreePages *int64
- Min *int64
- Low *int64
- High *int64
- Scanned *int64
- Spanned *int64
- Present *int64
- Managed *int64
- NrActiveAnon *int64
- NrInactiveAnon *int64
- NrIsolatedAnon *int64
- NrAnonPages *int64
- NrAnonTransparentHugepages *int64
- NrActiveFile *int64
- NrInactiveFile *int64
- NrIsolatedFile *int64
- NrFilePages *int64
- NrSlabReclaimable *int64
- NrSlabUnreclaimable *int64
- NrMlockStack *int64
- NrKernelStack *int64
- NrMapped *int64
- NrDirty *int64
- NrWriteback *int64
- NrUnevictable *int64
- NrShmem *int64
- NrDirtied *int64
- NrWritten *int64
- NumaHit *int64
- NumaMiss *int64
- NumaForeign *int64
- NumaInterleave *int64
- NumaLocal *int64
- NumaOther *int64
- Protection []*int64
-}
-
-var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
-
-// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
- if err != nil {
- return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
- }
- zoneinfo, err := parseZoneinfo(data)
- if err != nil {
- return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
- }
- return zoneinfo, nil
-}
-
-func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
-
- zoneinfo := []Zoneinfo{}
-
- zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
- for _, block := range zoneinfoBlocks {
- var zoneinfoElement Zoneinfo
- lines := strings.Split(string(block), "\n")
- for _, line := range lines {
-
- if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
- zoneinfoElement.Node = nodeZone[1]
- zoneinfoElement.Zone = nodeZone[2]
- continue
- }
- if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
- continue
- }
- parts := strings.Fields(strings.TrimSpace(line))
- if len(parts) < 2 {
- continue
- }
- vp := util.NewValueParser(parts[1])
- switch parts[0] {
- case "nr_free_pages":
- zoneinfoElement.NrFreePages = vp.PInt64()
- case "min":
- zoneinfoElement.Min = vp.PInt64()
- case "low":
- zoneinfoElement.Low = vp.PInt64()
- case "high":
- zoneinfoElement.High = vp.PInt64()
- case "scanned":
- zoneinfoElement.Scanned = vp.PInt64()
- case "spanned":
- zoneinfoElement.Spanned = vp.PInt64()
- case "present":
- zoneinfoElement.Present = vp.PInt64()
- case "managed":
- zoneinfoElement.Managed = vp.PInt64()
- case "nr_active_anon":
- zoneinfoElement.NrActiveAnon = vp.PInt64()
- case "nr_inactive_anon":
- zoneinfoElement.NrInactiveAnon = vp.PInt64()
- case "nr_isolated_anon":
- zoneinfoElement.NrIsolatedAnon = vp.PInt64()
- case "nr_anon_pages":
- zoneinfoElement.NrAnonPages = vp.PInt64()
- case "nr_anon_transparent_hugepages":
- zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
- case "nr_active_file":
- zoneinfoElement.NrActiveFile = vp.PInt64()
- case "nr_inactive_file":
- zoneinfoElement.NrInactiveFile = vp.PInt64()
- case "nr_isolated_file":
- zoneinfoElement.NrIsolatedFile = vp.PInt64()
- case "nr_file_pages":
- zoneinfoElement.NrFilePages = vp.PInt64()
- case "nr_slab_reclaimable":
- zoneinfoElement.NrSlabReclaimable = vp.PInt64()
- case "nr_slab_unreclaimable":
- zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
- case "nr_mlock_stack":
- zoneinfoElement.NrMlockStack = vp.PInt64()
- case "nr_kernel_stack":
- zoneinfoElement.NrKernelStack = vp.PInt64()
- case "nr_mapped":
- zoneinfoElement.NrMapped = vp.PInt64()
- case "nr_dirty":
- zoneinfoElement.NrDirty = vp.PInt64()
- case "nr_writeback":
- zoneinfoElement.NrWriteback = vp.PInt64()
- case "nr_unevictable":
- zoneinfoElement.NrUnevictable = vp.PInt64()
- case "nr_shmem":
- zoneinfoElement.NrShmem = vp.PInt64()
- case "nr_dirtied":
- zoneinfoElement.NrDirtied = vp.PInt64()
- case "nr_written":
- zoneinfoElement.NrWritten = vp.PInt64()
- case "numa_hit":
- zoneinfoElement.NumaHit = vp.PInt64()
- case "numa_miss":
- zoneinfoElement.NumaMiss = vp.PInt64()
- case "numa_foreign":
- zoneinfoElement.NumaForeign = vp.PInt64()
- case "numa_interleave":
- zoneinfoElement.NumaInterleave = vp.PInt64()
- case "numa_local":
- zoneinfoElement.NumaLocal = vp.PInt64()
- case "numa_other":
- zoneinfoElement.NumaOther = vp.PInt64()
- case "protection:":
- protectionParts := strings.Split(line, ":")
- protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
- protectionValues = strings.Replace(protectionValues, ")", "", 1)
- protectionValues = strings.TrimSpace(protectionValues)
- protectionStringMap := strings.Split(protectionValues, ", ")
- val, err := util.ParsePInt64s(protectionStringMap)
- if err == nil {
- zoneinfoElement.Protection = val
- }
- }
-
- }
-
- zoneinfo = append(zoneinfo, zoneinfoElement)
- }
- return zoneinfo, nil
-}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/.gitignore b/vendor/github.com/rabbitmq/amqp091-go/.gitignore
new file mode 100644
index 0000000000..a93cced225
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/.gitignore
@@ -0,0 +1,6 @@
+certs/*
+spec/spec
+examples/simple-consumer/simple-consumer
+examples/simple-producer/simple-producer
+
+.idea/
diff --git a/vendor/github.com/rabbitmq/amqp091-go/.golangci.yml b/vendor/github.com/rabbitmq/amqp091-go/.golangci.yml
new file mode 100644
index 0000000000..4341bcf984
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/.golangci.yml
@@ -0,0 +1,3 @@
+run:
+ build-tags:
+ - integration
diff --git a/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md b/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md
new file mode 100644
index 0000000000..db633d44a0
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/CHANGELOG.md
@@ -0,0 +1,295 @@
+# Changelog
+
+## [v1.8.1](https://github.com/rabbitmq/amqp091-go/tree/v1.8.1) (2023-05-04)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.8.0...v1.8.1)
+
+**Fixed bugs:**
+
+- Fixed incorrect version reported in client properties [52ce2efd03c53dcf77d5496977da46840e9abd24](https://github.com/rabbitmq/amqp091-go/commit/52ce2efd03c53dcf77d5496977da46840e9abd24)
+
+**Merged pull requests:**
+
+- Fix Example Client not reconnecting [\#186](https://github.com/rabbitmq/amqp091-go/pull/186) ([frankfil](https://github.com/frankfil))
+
+## [v1.8.0](https://github.com/rabbitmq/amqp091-go/tree/v1.8.0) (2023-03-21)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.7.0...v1.8.0)
+
+**Closed issues:**
+
+- memory leak [\#179](https://github.com/rabbitmq/amqp091-go/issues/179)
+- the publishWithContext interface will not return when it times out [\#178](https://github.com/rabbitmq/amqp091-go/issues/178)
+
+**Merged pull requests:**
+
+- Fix race condition on confirms [\#183](https://github.com/rabbitmq/amqp091-go/pull/183) ([calloway-jacob](https://github.com/calloway-jacob))
+- Add a CloseDeadline function to Connection [\#181](https://github.com/rabbitmq/amqp091-go/pull/181) ([Zerpet](https://github.com/Zerpet))
+- Fix memory leaks [\#180](https://github.com/rabbitmq/amqp091-go/pull/180) ([GXKe](https://github.com/GXKe))
+- Bump go.uber.org/goleak from 1.2.0 to 1.2.1 [\#177](https://github.com/rabbitmq/amqp091-go/pull/177) ([dependabot[bot]](https://github.com/apps/dependabot))
+
+## [v1.7.0](https://github.com/rabbitmq/amqp091-go/tree/v1.7.0) (2023-02-09)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.6.1...v1.7.0)
+
+**Closed issues:**
+
+- \#31 resurfacing \(?\) [\#170](https://github.com/rabbitmq/amqp091-go/issues/170)
+- Deprecate QueueInspect [\#167](https://github.com/rabbitmq/amqp091-go/issues/167)
+- v1.6.0 causing rabbit connection errors [\#160](https://github.com/rabbitmq/amqp091-go/issues/160)
+
+**Merged pull requests:**
+
+- Set channels and allocator to nil in shutdown [\#172](https://github.com/rabbitmq/amqp091-go/pull/172) ([lukebakken](https://github.com/lukebakken))
+- Fix racing in Open [\#171](https://github.com/rabbitmq/amqp091-go/pull/171) ([Zerpet](https://github.com/Zerpet))
+- adding go 1.20 to tests [\#169](https://github.com/rabbitmq/amqp091-go/pull/169) ([halilylm](https://github.com/halilylm))
+- Deprecate the QueueInspect function [\#168](https://github.com/rabbitmq/amqp091-go/pull/168) ([lukebakken](https://github.com/lukebakken))
+- Check if channel is nil before updating it [\#150](https://github.com/rabbitmq/amqp091-go/pull/150) ([julienschmidt](https://github.com/julienschmidt))
+
+## [v1.6.1](https://github.com/rabbitmq/amqp091-go/tree/v1.6.1) (2023-02-01)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.6.1-rc.2...v1.6.1)
+
+**Merged pull requests:**
+
+- Update Makefile targets related to RabbitMQ [\#163](https://github.com/rabbitmq/amqp091-go/pull/163) ([Zerpet](https://github.com/Zerpet))
+
+## [v1.6.1-rc.2](https://github.com/rabbitmq/amqp091-go/tree/v1.6.1-rc.2) (2023-01-31)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.6.1-rc.1...v1.6.1-rc.2)
+
+**Merged pull requests:**
+
+- Do not overly protect writes [\#162](https://github.com/rabbitmq/amqp091-go/pull/162) ([lukebakken](https://github.com/lukebakken))
+
+## [v1.6.1-rc.1](https://github.com/rabbitmq/amqp091-go/tree/v1.6.1-rc.1) (2023-01-31)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.6.0...v1.6.1-rc.1)
+
+**Closed issues:**
+
+- Calling Channel\(\) on an empty connection panics [\#148](https://github.com/rabbitmq/amqp091-go/issues/148)
+
+**Merged pull requests:**
+
+- Ensure flush happens and correctly lock connection for a series of unflushed writes [\#161](https://github.com/rabbitmq/amqp091-go/pull/161) ([lukebakken](https://github.com/lukebakken))
+
+## [v1.6.0](https://github.com/rabbitmq/amqp091-go/tree/v1.6.0) (2023-01-20)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.5.0...v1.6.0)
+
+**Implemented enhancements:**
+
+- Add constants for Queue arguments [\#145](https://github.com/rabbitmq/amqp091-go/pull/145) ([Zerpet](https://github.com/Zerpet))
+
+**Closed issues:**
+
+- README not up to date [\#154](https://github.com/rabbitmq/amqp091-go/issues/154)
+- Allow re-using default connection config \(custom properties\) [\#152](https://github.com/rabbitmq/amqp091-go/issues/152)
+- Rename package name to amqp in V2 [\#151](https://github.com/rabbitmq/amqp091-go/issues/151)
+- Helper types to declare quorum queues [\#144](https://github.com/rabbitmq/amqp091-go/issues/144)
+- Inefficient use of buffers reduces potential throughput for basicPublish with small messages. [\#141](https://github.com/rabbitmq/amqp091-go/issues/141)
+- bug, close cause panic [\#130](https://github.com/rabbitmq/amqp091-go/issues/130)
+- Publishing Headers are unable to store Table with slice values [\#125](https://github.com/rabbitmq/amqp091-go/issues/125)
+- Example client can deadlock in Close due to unconsumed confirmations [\#122](https://github.com/rabbitmq/amqp091-go/issues/122)
+- SAC not working properly [\#106](https://github.com/rabbitmq/amqp091-go/issues/106)
+
+**Merged pull requests:**
+
+- Add automatic CHANGELOG.md generation [\#158](https://github.com/rabbitmq/amqp091-go/pull/158) ([lukebakken](https://github.com/lukebakken))
+- Supply library-defined props with NewConnectionProperties [\#157](https://github.com/rabbitmq/amqp091-go/pull/157) ([slagiewka](https://github.com/slagiewka))
+- Fix linter warnings [\#156](https://github.com/rabbitmq/amqp091-go/pull/156) ([Zerpet](https://github.com/Zerpet))
+- Remove outdated information from README [\#155](https://github.com/rabbitmq/amqp091-go/pull/155) ([scriptcoded](https://github.com/scriptcoded))
+- Add example producer using DeferredConfirm [\#149](https://github.com/rabbitmq/amqp091-go/pull/149) ([Zerpet](https://github.com/Zerpet))
+- Ensure code is formatted [\#147](https://github.com/rabbitmq/amqp091-go/pull/147) ([lukebakken](https://github.com/lukebakken))
+- Fix inefficient use of buffers that reduces the potential throughput of basicPublish [\#142](https://github.com/rabbitmq/amqp091-go/pull/142) ([fadams](https://github.com/fadams))
+- Do not embed context in DeferredConfirmation [\#140](https://github.com/rabbitmq/amqp091-go/pull/140) ([tie](https://github.com/tie))
+- Add constant for default exchange [\#139](https://github.com/rabbitmq/amqp091-go/pull/139) ([marlongerson](https://github.com/marlongerson))
+- Fix indentation and remove unnecessary instructions [\#138](https://github.com/rabbitmq/amqp091-go/pull/138) ([alraujo](https://github.com/alraujo))
+- Remove unnecessary instruction [\#135](https://github.com/rabbitmq/amqp091-go/pull/135) ([alraujo](https://github.com/alraujo))
+- Fix example client to avoid deadlock in Close [\#123](https://github.com/rabbitmq/amqp091-go/pull/123) ([Zerpet](https://github.com/Zerpet))
+- Bump go.uber.org/goleak from 1.1.12 to 1.2.0 [\#116](https://github.com/rabbitmq/amqp091-go/pull/116) ([dependabot[bot]](https://github.com/apps/dependabot))
+
+## [v1.5.0](https://github.com/rabbitmq/amqp091-go/tree/v1.5.0) (2022-09-07)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.4.0...v1.5.0)
+
+**Implemented enhancements:**
+
+- Provide a friendly way to set connection name [\#105](https://github.com/rabbitmq/amqp091-go/issues/105)
+
+**Closed issues:**
+
+- Support connection.update-secret [\#107](https://github.com/rabbitmq/amqp091-go/issues/107)
+- Example Client: Implementation of a Consumer with reconnection support [\#40](https://github.com/rabbitmq/amqp091-go/issues/40)
+
+**Merged pull requests:**
+
+- use PublishWithContext instead of Publish [\#115](https://github.com/rabbitmq/amqp091-go/pull/115) ([Gsantomaggio](https://github.com/Gsantomaggio))
+- Add support for connection.update-secret [\#114](https://github.com/rabbitmq/amqp091-go/pull/114) ([Zerpet](https://github.com/Zerpet))
+- Remove warning on RabbitMQ tutorials in go [\#113](https://github.com/rabbitmq/amqp091-go/pull/113) ([ChunyiLyu](https://github.com/ChunyiLyu))
+- Update AMQP Spec [\#110](https://github.com/rabbitmq/amqp091-go/pull/110) ([Zerpet](https://github.com/Zerpet))
+- Add an example of reliable consumer [\#109](https://github.com/rabbitmq/amqp091-go/pull/109) ([Zerpet](https://github.com/Zerpet))
+- Add convenience function to set connection name [\#108](https://github.com/rabbitmq/amqp091-go/pull/108) ([Zerpet](https://github.com/Zerpet))
+
+## [v1.4.0](https://github.com/rabbitmq/amqp091-go/tree/v1.4.0) (2022-07-19)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.3.4...v1.4.0)
+
+**Closed issues:**
+
+- target machine actively refused connection [\#99](https://github.com/rabbitmq/amqp091-go/issues/99)
+- 504 channel/connection is not open error occurred in multiple connection with same rabbitmq service [\#97](https://github.com/rabbitmq/amqp091-go/issues/97)
+- Add possible cancel of DeferredConfirmation [\#92](https://github.com/rabbitmq/amqp091-go/issues/92)
+- Documentation [\#89](https://github.com/rabbitmq/amqp091-go/issues/89)
+- Channel Close gets stuck after closing a connection \(via management UI\) [\#88](https://github.com/rabbitmq/amqp091-go/issues/88)
+- this library has same issue [\#83](https://github.com/rabbitmq/amqp091-go/issues/83)
+- Provide a logging interface [\#81](https://github.com/rabbitmq/amqp091-go/issues/81)
+- 1.4.0 release checklist [\#77](https://github.com/rabbitmq/amqp091-go/issues/77)
+- Data race in the client example [\#72](https://github.com/rabbitmq/amqp091-go/issues/72)
+- reader go routine hangs and leaks when Connection.Close\(\) is called multiple times [\#69](https://github.com/rabbitmq/amqp091-go/issues/69)
+- Support auto-reconnect and cluster [\#65](https://github.com/rabbitmq/amqp091-go/issues/65)
+- Connection/Channel Deadlock [\#32](https://github.com/rabbitmq/amqp091-go/issues/32)
+- Closing connection and/or channel hangs NotifyPublish is used [\#21](https://github.com/rabbitmq/amqp091-go/issues/21)
+- Consumer channel isn't closed in the event of unexpected disconnection [\#18](https://github.com/rabbitmq/amqp091-go/issues/18)
+
+**Merged pull requests:**
+
+- fix race condition with context close and confirm at the same time on DeferredConfirmation. [\#101](https://github.com/rabbitmq/amqp091-go/pull/101) ([sapk](https://github.com/sapk))
+- Add build TLS config from URI [\#98](https://github.com/rabbitmq/amqp091-go/pull/98) ([reddec](https://github.com/reddec))
+- Use context for Publish methods [\#96](https://github.com/rabbitmq/amqp091-go/pull/96) ([sapk](https://github.com/sapk))
+- Added function to get the remote peer's IP address \(conn.RemoteAddr\(\)\) [\#95](https://github.com/rabbitmq/amqp091-go/pull/95) ([rabb1t](https://github.com/rabb1t))
+- Update connection documentation [\#90](https://github.com/rabbitmq/amqp091-go/pull/90) ([Zerpet](https://github.com/Zerpet))
+- Revert test to demonstrate actual bug [\#87](https://github.com/rabbitmq/amqp091-go/pull/87) ([lukebakken](https://github.com/lukebakken))
+- Minor improvements to examples [\#86](https://github.com/rabbitmq/amqp091-go/pull/86) ([lukebakken](https://github.com/lukebakken))
+- Do not skip flaky test in CI [\#85](https://github.com/rabbitmq/amqp091-go/pull/85) ([lukebakken](https://github.com/lukebakken))
+- Add logging [\#84](https://github.com/rabbitmq/amqp091-go/pull/84) ([lukebakken](https://github.com/lukebakken))
+- Add a win32 build [\#82](https://github.com/rabbitmq/amqp091-go/pull/82) ([lukebakken](https://github.com/lukebakken))
+- channel: return nothing instead of always a nil-error in receive methods [\#80](https://github.com/rabbitmq/amqp091-go/pull/80) ([fho](https://github.com/fho))
+- update the contributing & readme files, improve makefile [\#79](https://github.com/rabbitmq/amqp091-go/pull/79) ([fho](https://github.com/fho))
+- Fix lint errors [\#78](https://github.com/rabbitmq/amqp091-go/pull/78) ([lukebakken](https://github.com/lukebakken))
+- ci: run golangci-lint [\#76](https://github.com/rabbitmq/amqp091-go/pull/76) ([fho](https://github.com/fho))
+- ci: run test via make & remove travis CI config [\#75](https://github.com/rabbitmq/amqp091-go/pull/75) ([fho](https://github.com/fho))
+- ci: run tests with race detector [\#74](https://github.com/rabbitmq/amqp091-go/pull/74) ([fho](https://github.com/fho))
+- Detect go routine leaks in integration testcases [\#73](https://github.com/rabbitmq/amqp091-go/pull/73) ([fho](https://github.com/fho))
+- connection: fix: reader go-routine is leaked on connection close [\#70](https://github.com/rabbitmq/amqp091-go/pull/70) ([fho](https://github.com/fho))
+- adding best practises for NotifyPublish for issue\_21 scenario [\#68](https://github.com/rabbitmq/amqp091-go/pull/68) ([DanielePalaia](https://github.com/DanielePalaia))
+- Update Go version [\#67](https://github.com/rabbitmq/amqp091-go/pull/67) ([Zerpet](https://github.com/Zerpet))
+- Regenerate certs with SHA256 to fix test with Go 1.18+ [\#66](https://github.com/rabbitmq/amqp091-go/pull/66) ([anthonyfok](https://github.com/anthonyfok))
+
+## [v1.3.4](https://github.com/rabbitmq/amqp091-go/tree/v1.3.4) (2022-04-01)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.3.3...v1.3.4)
+
+**Merged pull requests:**
+
+- bump version to 1.3.4 [\#63](https://github.com/rabbitmq/amqp091-go/pull/63) ([DanielePalaia](https://github.com/DanielePalaia))
+- updating doc [\#62](https://github.com/rabbitmq/amqp091-go/pull/62) ([DanielePalaia](https://github.com/DanielePalaia))
+
+## [v1.3.3](https://github.com/rabbitmq/amqp091-go/tree/v1.3.3) (2022-04-01)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.3.2...v1.3.3)
+
+**Closed issues:**
+
+- Add Client Version [\#49](https://github.com/rabbitmq/amqp091-go/issues/49)
+- OpenTelemetry Propagation [\#22](https://github.com/rabbitmq/amqp091-go/issues/22)
+
+**Merged pull requests:**
+
+- bump buildVersion for release [\#61](https://github.com/rabbitmq/amqp091-go/pull/61) ([DanielePalaia](https://github.com/DanielePalaia))
+- adding documentation for notifyClose best pratices [\#60](https://github.com/rabbitmq/amqp091-go/pull/60) ([DanielePalaia](https://github.com/DanielePalaia))
+- adding documentation on NotifyClose of connection and channel to enfo… [\#59](https://github.com/rabbitmq/amqp091-go/pull/59) ([DanielePalaia](https://github.com/DanielePalaia))
+
+## [v1.3.2](https://github.com/rabbitmq/amqp091-go/tree/v1.3.2) (2022-03-28)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.3.1...v1.3.2)
+
+**Closed issues:**
+
+- Potential race condition in Connection module [\#31](https://github.com/rabbitmq/amqp091-go/issues/31)
+
+**Merged pull requests:**
+
+- bump versioning to 1.3.2 [\#58](https://github.com/rabbitmq/amqp091-go/pull/58) ([DanielePalaia](https://github.com/DanielePalaia))
+
+## [v1.3.1](https://github.com/rabbitmq/amqp091-go/tree/v1.3.1) (2022-03-25)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.3.0...v1.3.1)
+
+**Closed issues:**
+
+- Possible deadlock on DeferredConfirmation.Wait\(\) [\#46](https://github.com/rabbitmq/amqp091-go/issues/46)
+- Call to Delivery.Ack blocks indefinitely in case of disconnection [\#19](https://github.com/rabbitmq/amqp091-go/issues/19)
+- Unexpacted behavor of channel.IsClosed\(\) [\#14](https://github.com/rabbitmq/amqp091-go/issues/14)
+- A possible dead lock in connection close notification Go channel [\#11](https://github.com/rabbitmq/amqp091-go/issues/11)
+
+**Merged pull requests:**
+
+- These ones were the ones testing Open scenarios. The issue is that Op… [\#57](https://github.com/rabbitmq/amqp091-go/pull/57) ([DanielePalaia](https://github.com/DanielePalaia))
+- changing defaultVersion to buildVersion and create a simple change\_ve… [\#54](https://github.com/rabbitmq/amqp091-go/pull/54) ([DanielePalaia](https://github.com/DanielePalaia))
+- adding integration test for issue 11 [\#50](https://github.com/rabbitmq/amqp091-go/pull/50) ([DanielePalaia](https://github.com/DanielePalaia))
+- Remove the old link product [\#48](https://github.com/rabbitmq/amqp091-go/pull/48) ([Gsantomaggio](https://github.com/Gsantomaggio))
+- Fix deadlock on DeferredConfirmations [\#47](https://github.com/rabbitmq/amqp091-go/pull/47) ([SpencerTorres](https://github.com/SpencerTorres))
+- Example client: Rename Stream\(\) to Consume\(\) to avoid confusion with RabbitMQ streams [\#39](https://github.com/rabbitmq/amqp091-go/pull/39) ([andygrunwald](https://github.com/andygrunwald))
+- Example client: Rename `name` to `queueName` to make the usage clear and explicit [\#38](https://github.com/rabbitmq/amqp091-go/pull/38) ([andygrunwald](https://github.com/andygrunwald))
+- Client example: Renamed concept "Session" to "Client" [\#37](https://github.com/rabbitmq/amqp091-go/pull/37) ([andygrunwald](https://github.com/andygrunwald))
+- delete unuseful code [\#36](https://github.com/rabbitmq/amqp091-go/pull/36) ([liutaot](https://github.com/liutaot))
+- Client Example: Fix closing order [\#35](https://github.com/rabbitmq/amqp091-go/pull/35) ([andygrunwald](https://github.com/andygrunwald))
+- Client example: Use instance logger instead of global logger [\#34](https://github.com/rabbitmq/amqp091-go/pull/34) ([andygrunwald](https://github.com/andygrunwald))
+
+## [v1.3.0](https://github.com/rabbitmq/amqp091-go/tree/v1.3.0) (2022-01-13)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.2.0...v1.3.0)
+
+**Closed issues:**
+
+- documentation of changes triggering version updates [\#29](https://github.com/rabbitmq/amqp091-go/issues/29)
+- Persistent messages folder [\#27](https://github.com/rabbitmq/amqp091-go/issues/27)
+
+**Merged pull requests:**
+
+- Expose a method to enable out-of-order Publisher Confirms [\#33](https://github.com/rabbitmq/amqp091-go/pull/33) ([benmoss](https://github.com/benmoss))
+- Fix Signed 8-bit headers being treated as unsigned [\#26](https://github.com/rabbitmq/amqp091-go/pull/26) ([alex-goodisman](https://github.com/alex-goodisman))
+
+## [v1.2.0](https://github.com/rabbitmq/amqp091-go/tree/v1.2.0) (2021-11-17)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/v1.1.0...v1.2.0)
+
+**Closed issues:**
+
+- No access to this vhost [\#24](https://github.com/rabbitmq/amqp091-go/issues/24)
+- copyright issue? [\#12](https://github.com/rabbitmq/amqp091-go/issues/12)
+- A possible dead lock when publishing message with confirmation [\#10](https://github.com/rabbitmq/amqp091-go/issues/10)
+- Semver release [\#7](https://github.com/rabbitmq/amqp091-go/issues/7)
+
+**Merged pull requests:**
+
+- Fix deadlock between publishing and receiving confirms [\#25](https://github.com/rabbitmq/amqp091-go/pull/25) ([benmoss](https://github.com/benmoss))
+- Add GetNextPublishSeqNo for channel in confirm mode [\#23](https://github.com/rabbitmq/amqp091-go/pull/23) ([kamal-github](https://github.com/kamal-github))
+- Added support for cert-only login without user and password [\#20](https://github.com/rabbitmq/amqp091-go/pull/20) ([mihaitodor](https://github.com/mihaitodor))
+
+## [v1.1.0](https://github.com/rabbitmq/amqp091-go/tree/v1.1.0) (2021-09-21)
+
+[Full Changelog](https://github.com/rabbitmq/amqp091-go/compare/ebd83429aa8cb06fa569473f623e87675f96d3a9...v1.1.0)
+
+**Closed issues:**
+
+- AMQPLAIN authentication does not work [\#15](https://github.com/rabbitmq/amqp091-go/issues/15)
+
+**Merged pull requests:**
+
+- Fix AMQPLAIN authentication mechanism [\#16](https://github.com/rabbitmq/amqp091-go/pull/16) ([hodbn](https://github.com/hodbn))
+- connection: clarify documented behavior of NotifyClose [\#13](https://github.com/rabbitmq/amqp091-go/pull/13) ([pabigot](https://github.com/pabigot))
+- Add a link to pkg.go.dev API docs [\#9](https://github.com/rabbitmq/amqp091-go/pull/9) ([benmoss](https://github.com/benmoss))
+- add test go version 1.16.x and 1.17.x [\#8](https://github.com/rabbitmq/amqp091-go/pull/8) ([k4n4ry](https://github.com/k4n4ry))
+- fix typos [\#6](https://github.com/rabbitmq/amqp091-go/pull/6) ([h44z](https://github.com/h44z))
+- Heartbeat interval should be timeout/2 [\#5](https://github.com/rabbitmq/amqp091-go/pull/5) ([ifo20](https://github.com/ifo20))
+- Exporting Channel State [\#4](https://github.com/rabbitmq/amqp091-go/pull/4) ([eibrunorodrigues](https://github.com/eibrunorodrigues))
+- Add codeql analysis [\#3](https://github.com/rabbitmq/amqp091-go/pull/3) ([MirahImage](https://github.com/MirahImage))
+- Add PR github action. [\#2](https://github.com/rabbitmq/amqp091-go/pull/2) ([MirahImage](https://github.com/MirahImage))
+- Update Copyright Statement [\#1](https://github.com/rabbitmq/amqp091-go/pull/1) ([rlewis24](https://github.com/rlewis24))
+
+
+
+\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
diff --git a/vendor/github.com/rabbitmq/amqp091-go/CODE_OF_CONDUCT.md b/vendor/github.com/rabbitmq/amqp091-go/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..24b5675902
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/CODE_OF_CONDUCT.md
@@ -0,0 +1,77 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in RabbitMQ Operator project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-coc@vmware.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
+
diff --git a/vendor/github.com/rabbitmq/amqp091-go/CONTRIBUTING.md b/vendor/github.com/rabbitmq/amqp091-go/CONTRIBUTING.md
new file mode 100644
index 0000000000..ec86fe54c4
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/CONTRIBUTING.md
@@ -0,0 +1,62 @@
+# Contributing
+
+## Workflow
+
+Here is the recommended workflow:
+
+1. Fork this repository, **github.com/rabbitmq/amqp091-go**
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Run Static Checks
+1. Run integration tests (see below)
+1. **Implement tests**
+1. Implement fixes
+1. Commit your changes. Use a [good, descriptive, commit message][good-commit].
+1. Push to a branch (`git push -u origin my-new-feature`)
+1. Submit a pull request
+
+[good-commit]: https://cbea.ms/git-commit/
+
+## Running Static Checks
+
+golangci-lint must be installed to run the static checks. See [installation
+docs](https://golangci-lint.run/usage/install/) for more information.
+
+The static checks can be run via:
+
+```shell
+make checks
+```
+
+## Running Tests
+
+### Integration Tests
+
+Running the Integration tests require:
+
+* A running RabbitMQ node with all defaults:
+ [https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html)
+* That the server is either reachable via `amqp://guest:guest@127.0.0.1:5672/`
+ or the environment variable `AMQP_URL` set to it's URL
+ (e.g.: `export AMQP_URL="amqp://guest:verysecretpasswd@rabbitmq-host:5772/`)
+
+The integration tests can be run via:
+
+```shell
+make tests
+```
+
+Some tests require access to `rabbitmqctl` CLI. Use the environment variable
+`RABBITMQ_RABBITMQCTL_PATH=/some/path/to/rabbitmqctl` to run those tests.
+
+If you have Docker available in your machine, you can run:
+
+```shell
+make tests-docker
+```
+
+This target will start a RabbitMQ container, run the test suite with the environment
+variable setup, and stop RabbitMQ container after a successful run.
+
+All integration tests should use the `integrationConnection(...)` test
+helpers defined in `integration_test.go` to setup the integration environment
+and logging.
diff --git a/vendor/github.com/rabbitmq/amqp091-go/LICENSE b/vendor/github.com/rabbitmq/amqp091-go/LICENSE
new file mode 100644
index 0000000000..72fa55ebcb
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/LICENSE
@@ -0,0 +1,25 @@
+AMQP 0-9-1 Go Client
+Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+
+Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/rabbitmq/amqp091-go/Makefile b/vendor/github.com/rabbitmq/amqp091-go/Makefile
new file mode 100644
index 0000000000..7dc71bc5ff
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/Makefile
@@ -0,0 +1,57 @@
+.DEFAULT_GOAL := list
+
+# Insert a comment starting with '##' after a target, and it will be printed by 'make' and 'make list'
+.PHONY: list
+list: ## list Makefile targets
+ @echo "The most used targets: \n"
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: check-fmt
+check-fmt: ## Ensure code is formatted
+ gofmt -l -d . # For the sake of debugging
+ test -z "$$(gofmt -l .)"
+
+.PHONY: fmt
+fmt: ## Run go fmt against code
+ go fmt ./...
+
+.PHONY: tests
+tests: ## Run all tests and requires a running rabbitmq-server. Use GO_TEST_FLAGS to add extra flags to go test
+ go test -race -v -tags integration $(GO_TEST_FLAGS)
+
+.PHONY: tests-docker
+tests-docker: rabbitmq-server
+ RABBITMQ_RABBITMQCTL_PATH="DOCKER:$(CONTAINER_NAME)" go test -race -v -tags integration $(GO_TEST_FLAGS)
+ $(MAKE) stop-rabbitmq-server
+
+.PHONY: check
+check:
+ golangci-lint run ./...
+
+CONTAINER_NAME ?= amqp091-go-rabbitmq
+
+.PHONY: rabbitmq-server
+rabbitmq-server: ## Start a RabbitMQ server using Docker. Container name can be customised with CONTAINER_NAME=some-rabbit
+ docker run --detach --rm --name $(CONTAINER_NAME) \
+ --publish 5672:5672 --publish 15672:15672 \
+ --pull always rabbitmq:3-management
+
+.PHONY: stop-rabbitmq-server
+stop-rabbitmq-server: ## Stop a RabbitMQ server using Docker. Container name can be customised with CONTAINER_NAME=some-rabbit
+ docker stop $(CONTAINER_NAME)
+
+certs:
+ ./certs.sh
+
+.PHONY: certs-rm
+certs-rm:
+ rm -r ./certs/
+
+.PHONY: rabbitmq-server-tls
+rabbitmq-server-tls: | certs ## Start a RabbitMQ server using Docker. Container name can be customised with CONTAINER_NAME=some-rabbit
+ docker run --detach --rm --name $(CONTAINER_NAME) \
+ --publish 5672:5672 --publish 5671:5671 --publish 15672:15672 \
+ --mount type=bind,src=./certs/server,dst=/certs \
+ --mount type=bind,src=./certs/ca/cacert.pem,dst=/certs/cacert.pem,readonly \
+ --mount type=bind,src=./rabbitmq-confs/tls/90-tls.conf,dst=/etc/rabbitmq/conf.d/90-tls.conf \
+ --pull always rabbitmq:3-management
diff --git a/vendor/github.com/rabbitmq/amqp091-go/README.md b/vendor/github.com/rabbitmq/amqp091-go/README.md
new file mode 100644
index 0000000000..6d3143f67a
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/README.md
@@ -0,0 +1,105 @@
+# Go RabbitMQ Client Library
+
+[![amqp091-go](https://github.com/rabbitmq/amqp091-go/actions/workflows/tests.yml/badge.svg)](https://github.com/rabbitmq/amqp091-go/actions/workflows/tests.yml)
+[![Go Reference](https://pkg.go.dev/badge/github.com/rabbitmq/amqp091-go.svg)](https://pkg.go.dev/github.com/rabbitmq/amqp091-go)
+[![Go Report Card](https://goreportcard.com/badge/github.com/rabbitmq/amqp091-go)](https://goreportcard.com/report/github.com/rabbitmq/amqp091-go)
+
+This is a Go AMQP 0.9.1 client maintained by the [RabbitMQ core team](https://github.com/rabbitmq).
+It was [originally developed by Sean Treadway](https://github.com/streadway/amqp).
+
+## Differences from streadway/amqp
+
+Some things are different compared to the original client,
+others haven't changed.
+
+### Package Name
+
+This library uses a different package name. If moving from `streadway/amqp`,
+using an alias may reduce the number of changes needed:
+
+``` go
+amqp "github.com/rabbitmq/amqp091-go"
+```
+
+### License
+
+This client uses the same 2-clause BSD license as the original project.
+
+### Public API Evolution
+
+ This client retains key API elements as practically possible.
+ It is, however, open to reasonable breaking public API changes suggested by the community.
+ We don't have the "no breaking public API changes ever" rule and fully recognize
+ that a good client API evolves over time.
+
+
+## Project Maturity
+
+This project is based on a mature Go client that's been around for over a decade.
+
+
+## Supported Go Versions
+
+This client supports two most recent Go release series.
+
+
+## Supported RabbitMQ Versions
+
+This project supports RabbitMQ versions starting with `2.0` but primarily tested
+against [currently supported RabbitMQ release series](https://www.rabbitmq.com/versions.html).
+
+Some features and behaviours may be server version-specific.
+
+## Goals
+
+Provide a functional interface that closely represents the AMQP 0.9.1 model
+targeted to RabbitMQ as a server. This includes the minimum necessary to
+interact the semantics of the protocol.
+
+## Non-goals
+
+Things not intended to be supported.
+
+ * Auto reconnect and re-synchronization of client and server topologies.
+ * Reconnection would require understanding the error paths when the
+ topology cannot be declared on reconnect. This would require a new set
+ of types and code paths that are best suited at the call-site of this
+ package. AMQP has a dynamic topology that needs all peers to agree. If
+ this doesn't happen, the behavior is undefined. Instead of producing a
+ possible interface with undefined behavior, this package is designed to
+ be simple for the caller to implement the necessary connection-time
+ topology declaration so that reconnection is trivial and encapsulated in
+ the caller's application code.
+ * AMQP Protocol negotiation for forward or backward compatibility.
+ * 0.9.1 is stable and widely deployed. AMQP 1.0 is a divergent
+ specification (a different protocol) and belongs to a different library.
+ * Anything other than PLAIN and EXTERNAL authentication mechanisms.
+ * Keeping the mechanisms interface modular makes it possible to extend
+ outside of this package. If other mechanisms prove to be popular, then
+ we would accept patches to include them in this package.
+ * Support for [`basic.return` and `basic.ack` frame ordering](https://www.rabbitmq.com/confirms.html#when-publishes-are-confirmed).
+ This client uses Go channels for certain protocol events and ordering between
+ events sent to two different channels generally cannot be guaranteed.
+
+## Usage
+
+See the [_examples](_examples) subdirectory for simple producers and consumers executables.
+If you have a use-case in mind which isn't well-represented by the examples,
+please file an issue.
+
+## Documentation
+
+ * [Godoc API reference](http://godoc.org/github.com/rabbitmq/amqp091-go)
+ * [RabbitMQ tutorials in Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go)
+
+## Contributing
+
+Pull requests are very much welcomed. Create your pull request on a non-main
+branch, make sure a test or example is included that covers your change, and
+your commits represent coherent changes that include a reason for the change.
+
+See [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
+
+## License
+
+BSD 2 clause, see LICENSE for more details.
diff --git a/vendor/github.com/rabbitmq/amqp091-go/RELEASE.md b/vendor/github.com/rabbitmq/amqp091-go/RELEASE.md
new file mode 100644
index 0000000000..1378d68866
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/RELEASE.md
@@ -0,0 +1,16 @@
+# Guide to release a new version
+
+1. Update the `buildVersion` constant in [connection.go](https://github.com/rabbitmq/amqp091-go/blob/4886c35d10b273bd374e3ed2356144ad41d27940/connection.go#L31)
+2. Commit and push. Include the version in the commit message e.g. [this commit](https://github.com/rabbitmq/amqp091-go/commit/52ce2efd03c53dcf77d5496977da46840e9abd24)
+3. Create a new [GitHub Release](https://github.com/rabbitmq/amqp091-go/releases). Create a new tag as `v..`
+ 1. Use auto-generate release notes feature in GitHub
+4. Generate the change log, see [Changelog Generation](#changelog-generation)
+5. Review the changelog. Watch out for issues closed as "not-fixed" or without a PR
+6. Commit and Push. Pro-tip: include `[skip ci]` in the commit message to skip the CI run, since it's only documentation
+7. Send an announcement to the mailing list. Take inspiration from [this message](https://groups.google.com/g/rabbitmq-users/c/EBGYGOWiSgs/m/0sSFuAGICwAJ)
+
+## Changelog Generation
+
+```
+github_changelog_generator --token GITHUB-TOKEN -u rabbitmq -p amqp091-go --no-unreleased --release-branch main
+```
diff --git a/vendor/github.com/rabbitmq/amqp091-go/allocator.go b/vendor/github.com/rabbitmq/amqp091-go/allocator.go
new file mode 100644
index 0000000000..f2925e742b
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/allocator.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+)
+
+const (
+ free = 0
+ allocated = 1
+)
+
+// allocator maintains a bitset of allocated numbers.
+type allocator struct {
+ pool *big.Int
+ follow int
+ low int
+ high int
+}
+
+// NewAllocator reserves and frees integers out of a range between low and
+// high.
+//
+// O(N) worst case space used, where N is maximum allocated, divided by
+// sizeof(big.Word)
+func newAllocator(low, high int) *allocator {
+ return &allocator{
+ pool: big.NewInt(0),
+ follow: low,
+ low: low,
+ high: high,
+ }
+}
+
+// String returns a string describing the contents of the allocator like
+// "allocator[low..high] reserved..until"
+//
+// O(N) where N is high-low
+func (a allocator) String() string {
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high)
+
+ for low := a.low; low <= a.high; low++ {
+ high := low
+ for a.reserved(high) && high <= a.high {
+ high++
+ }
+
+ if high > low+1 {
+ fmt.Fprintf(b, " %d..%d", low, high-1)
+ } else if high > low {
+ fmt.Fprintf(b, " %d", high-1)
+ }
+
+ low = high
+ }
+ return b.String()
+}
+
+// Next reserves and returns the next available number out of the range between
+// low and high. If no number is available, false is returned.
+//
+// O(N) worst case runtime where N is allocated, but usually O(1) due to a
+// rolling index into the oldest allocation.
+func (a *allocator) next() (int, bool) {
+ wrapped := a.follow
+ defer func() {
+ // make a.follow point to next value
+ if a.follow == a.high {
+ a.follow = a.low
+ } else {
+ a.follow += 1
+ }
+ }()
+
+ // Find trailing bit
+ for ; a.follow <= a.high; a.follow++ {
+ if a.reserve(a.follow) {
+ return a.follow, true
+ }
+ }
+
+ // Find preceding free'd pool
+ a.follow = a.low
+
+ for ; a.follow < wrapped; a.follow++ {
+ if a.reserve(a.follow) {
+ return a.follow, true
+ }
+ }
+
+ return 0, false
+}
+
+// reserve claims the bit if it is not already claimed, returning true if
+// successfully claimed.
+func (a *allocator) reserve(n int) bool {
+ if a.reserved(n) {
+ return false
+ }
+ a.pool.SetBit(a.pool, n-a.low, allocated)
+ return true
+}
+
+// reserved returns true if the integer has been allocated
+func (a *allocator) reserved(n int) bool {
+ return a.pool.Bit(n-a.low) == allocated
+}
+
+// release frees the use of the number for another allocation
+func (a *allocator) release(n int) {
+ a.pool.SetBit(a.pool, n-a.low, free)
+}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/auth.go b/vendor/github.com/rabbitmq/amqp091-go/auth.go
new file mode 100644
index 0000000000..0c07bb3ece
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/auth.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Authentication interface provides a means for different SASL authentication
+// mechanisms to be used during connection tuning.
+type Authentication interface {
+ Mechanism() string
+ Response() string
+}
+
+// PlainAuth is a similar to Basic Auth in HTTP.
+type PlainAuth struct {
+ Username string
+ Password string
+}
+
+// Mechanism returns "PLAIN"
+func (auth *PlainAuth) Mechanism() string {
+ return "PLAIN"
+}
+
+// Response returns the null character delimited encoding for the SASL PLAIN Mechanism.
+func (auth *PlainAuth) Response() string {
+ return fmt.Sprintf("\000%s\000%s", auth.Username, auth.Password)
+}
+
+// AMQPlainAuth is similar to PlainAuth
+type AMQPlainAuth struct {
+ Username string
+ Password string
+}
+
+// Mechanism returns "AMQPLAIN"
+func (auth *AMQPlainAuth) Mechanism() string {
+ return "AMQPLAIN"
+}
+
+// Response returns an AMQP encoded credentials table, without the field table size.
+func (auth *AMQPlainAuth) Response() string {
+ var buf bytes.Buffer
+ table := Table{"LOGIN": auth.Username, "PASSWORD": auth.Password}
+ if err := writeTable(&buf, table); err != nil {
+ return ""
+ }
+ return buf.String()[4:]
+}
+
+// ExternalAuth for RabbitMQ-auth-mechanism-ssl.
+type ExternalAuth struct {
+}
+
+// Mechanism returns "EXTERNAL"
+func (*ExternalAuth) Mechanism() string {
+ return "EXTERNAL"
+}
+
+// Response returns an AMQP encoded credentials table, without the field table size.
+func (*ExternalAuth) Response() string {
+ return "\000*\000*"
+}
+
+// Finds the first mechanism preferred by the client that the server supports.
+func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) {
+
+ for _, auth = range client {
+ for _, mech := range serverMechanisms {
+ if auth.Mechanism() == mech {
+ return auth, true
+ }
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/streadway/amqp/certs.sh b/vendor/github.com/rabbitmq/amqp091-go/certs.sh
similarity index 93%
rename from vendor/github.com/streadway/amqp/certs.sh
rename to vendor/github.com/rabbitmq/amqp091-go/certs.sh
index 834f422427..0bbb1c6c0e 100644
--- a/vendor/github.com/streadway/amqp/certs.sh
+++ b/vendor/github.com/rabbitmq/amqp091-go/certs.sh
@@ -38,7 +38,7 @@ serial = $dir/serial
default_crl_days = 7
default_days = 3650
-default_md = sha1
+default_md = sha256
policy = testca_policy
x509_extensions = certificate_extensions
@@ -57,7 +57,7 @@ basicConstraints = CA:false
[ req ]
default_bits = 2048
default_keyfile = ./private/cakey.pem
-default_md = sha1
+default_md = sha256
prompt = yes
distinguished_name = root_ca_distinguished_name
x509_extensions = root_ca_extensions
@@ -71,12 +71,12 @@ keyUsage = keyCertSign, cRLSign
[ client_ca_extensions ]
basicConstraints = CA:false
-keyUsage = digitalSignature
+keyUsage = keyEncipherment,digitalSignature
extendedKeyUsage = 1.3.6.1.5.5.7.3.2
[ server_ca_extensions ]
basicConstraints = CA:false
-keyUsage = keyEncipherment
+keyUsage = keyEncipherment,digitalSignature
extendedKeyUsage = 1.3.6.1.5.5.7.3.1
subjectAltName = @alt_names
@@ -106,7 +106,7 @@ openssl req \
-new \
-nodes \
-config openssl.cnf \
- -subj "/CN=127.0.0.1/O=server/" \
+ -subj "/CN=localhost/O=server/" \
-key $root/server/key.pem \
-out $root/server/req.pem \
-outform PEM
@@ -115,7 +115,7 @@ openssl req \
-new \
-nodes \
-config openssl.cnf \
- -subj "/CN=127.0.0.1/O=client/" \
+ -subj "/CN=localhost/O=client/" \
-key $root/client/key.pem \
-out $root/client/req.pem \
-outform PEM
diff --git a/vendor/github.com/rabbitmq/amqp091-go/change_version.sh b/vendor/github.com/rabbitmq/amqp091-go/change_version.sh
new file mode 100644
index 0000000000..ff8e3694c6
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/change_version.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+echo $1 > VERSION
+sed -i -e "s/.*buildVersion = \"*.*/buildVersion = \"$1\"/" ./connection.go
+go fmt ./...
diff --git a/vendor/github.com/streadway/amqp/channel.go b/vendor/github.com/rabbitmq/amqp091-go/channel.go
similarity index 77%
rename from vendor/github.com/streadway/amqp/channel.go
rename to vendor/github.com/rabbitmq/amqp091-go/channel.go
index cd19ce7ee0..0dcec90255 100644
--- a/vendor/github.com/streadway/amqp/channel.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/channel.go
@@ -1,11 +1,13 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-package amqp
+package amqp091
import (
+ "context"
+ "errors"
"reflect"
"sync"
"sync/atomic"
@@ -15,14 +17,14 @@ import (
// +------+---------+-------------+ +------------+ +-----------+
// | type | channel | size | | payload | | frame-end |
// +------+---------+-------------+ +------------+ +-----------+
-// octet short long size octets octet
+//
+// octet short long size octets octet
const frameHeaderSize = 1 + 2 + 4 + 1
/*
Channel represents an AMQP channel. Used as a context for valid message
exchange. Errors on methods with this Channel as a receiver means this channel
should be discarded and a new channel established.
-
*/
type Channel struct {
destructor sync.Once
@@ -39,6 +41,7 @@ type Channel struct {
// closed is set to 1 when the channel has been closed - see Channel.send()
closed int32
+ close chan struct{}
// true when we will never notify again
noNotify bool
@@ -66,7 +69,7 @@ type Channel struct {
errors chan *Error
// State machine that manages frame order, must only be mutated by the connection
- recv func(*Channel, frame) error
+ recv func(*Channel, frame)
// Current state for frame re-assembly, only mutated from recv
message messageWithContent
@@ -84,12 +87,20 @@ func newChannel(c *Connection, id uint16) *Channel {
confirms: newConfirms(),
recv: (*Channel).recvMethod,
errors: make(chan *Error, 1),
+ close: make(chan struct{}),
}
}
+// Signal that from now on, Channel.send() should call Channel.sendClosed()
+func (ch *Channel) setClosed() {
+ atomic.StoreInt32(&ch.closed, 1)
+}
+
// shutdown is called by Connection after the channel has been removed from the
// connection registry.
func (ch *Channel) shutdown(e *Error) {
+ ch.setClosed()
+
ch.destructor.Do(func() {
ch.m.Lock()
defer ch.m.Unlock()
@@ -103,14 +114,7 @@ func (ch *Channel) shutdown(e *Error) {
for _, c := range ch.closes {
c <- e
}
- }
-
- // Signal that from now on, Channel.send() should call
- // Channel.sendClosed()
- atomic.StoreInt32(&ch.closed, 1)
-
- // Notify RPC if we're selecting
- if e != nil {
+ // Notify RPC if we're selecting
ch.errors <- e
}
@@ -144,6 +148,7 @@ func (ch *Channel) shutdown(e *Error) {
}
close(ch.errors)
+ close(ch.close)
ch.noNotify = true
})
}
@@ -154,7 +159,7 @@ func (ch *Channel) shutdown(e *Error) {
// only 'channel.close' is sent to the server.
func (ch *Channel) send(msg message) (err error) {
// If the channel is closed, use Channel.sendClosed()
- if atomic.LoadInt32(&ch.closed) == 1 {
+ if ch.IsClosed() {
return ch.sendClosed(msg)
}
@@ -230,14 +235,38 @@ func (ch *Channel) sendOpen(msg message) (err error) {
size = len(body)
}
- if err = ch.connection.send(&methodFrame{
+ // If the channel is closed, use Channel.sendClosed()
+ if ch.IsClosed() {
+ return ch.sendClosed(msg)
+ }
+
+ // Flush the buffer only after all the Frames that comprise the Message
+ // have been written to maximise benefits of using a buffered writer.
+ defer func() {
+ if endError := ch.connection.endSendUnflushed(); endError != nil {
+ if err == nil {
+ err = endError
+ }
+ }
+ }()
+
+ // We use sendUnflushed() in this method as sending the message requires
+ // sending multiple Frames (methodFrame, headerFrame, N x bodyFrame).
+ // Flushing after each Frame is inefficient, as it negates much of the
+ // benefit of using a buffered writer and results in more syscalls than
+ // necessary. Flushing buffers after every frame can have a significant
+ // performance impact when sending (e.g. basicPublish) small messages,
+ // so sendUnflushed() performs an *Unflushed* write, but is otherwise
+ // equivalent to the send() method. We later use the separate flush
+ // method to explicitly flush the buffer after all Frames are written.
+ if err = ch.connection.sendUnflushed(&methodFrame{
ChannelId: ch.id,
Method: content,
}); err != nil {
return
}
- if err = ch.connection.send(&headerFrame{
+ if err = ch.connection.sendUnflushed(&headerFrame{
ChannelId: ch.id,
ClassId: class,
Size: uint64(len(body)),
@@ -252,7 +281,7 @@ func (ch *Channel) sendOpen(msg message) (err error) {
j = len(body)
}
- if err = ch.connection.send(&bodyFrame{
+ if err = ch.connection.sendUnflushed(&bodyFrame{
ChannelId: ch.id,
Body: body[i:j],
}); err != nil {
@@ -260,6 +289,11 @@ func (ch *Channel) sendOpen(msg message) (err error) {
}
}
} else {
+ // If the channel is closed, use Channel.sendClosed()
+ if ch.IsClosed() {
+ return ch.sendClosed(msg)
+ }
+
err = ch.connection.send(&methodFrame{
ChannelId: ch.id,
Method: msg,
@@ -274,11 +308,16 @@ func (ch *Channel) sendOpen(msg message) (err error) {
func (ch *Channel) dispatch(msg message) {
switch m := msg.(type) {
case *channelClose:
+ // Note: channel state is set to closed immedately after the message is
+ // decoded by the Connection
+
// lock before sending connection.close-ok
// to avoid unexpected interleaving with basic.publish frames if
// publishing is happening concurrently
ch.m.Lock()
- ch.send(&channelCloseOk{})
+ if err := ch.send(&channelCloseOk{}); err != nil {
+ Logger.Printf("error sending channelCloseOk, channel id: %d error: %+v", ch.id, err)
+ }
ch.m.Unlock()
ch.connection.closeChannel(ch, newError(m.ReplyCode, m.ReplyText))
@@ -288,7 +327,9 @@ func (ch *Channel) dispatch(msg message) {
c <- m.Active
}
ch.notifyM.RUnlock()
- ch.send(&channelFlowOk{Active: m.Active})
+ if err := ch.send(&channelFlowOk{Active: m.Active}); err != nil {
+ Logger.Printf("error sending channelFlowOk, channel id: %d error: %+v", ch.id, err)
+ }
case *basicCancel:
ch.notifyM.RLock()
@@ -330,44 +371,49 @@ func (ch *Channel) dispatch(msg message) {
// deliveries are in flight and a no-wait cancel has happened
default:
- ch.rpc <- msg
+ select {
+ case <-ch.close:
+ return
+ case ch.rpc <- msg:
+ }
}
}
-func (ch *Channel) transition(f func(*Channel, frame) error) error {
+func (ch *Channel) transition(f func(*Channel, frame)) {
ch.recv = f
- return nil
}
-func (ch *Channel) recvMethod(f frame) error {
+func (ch *Channel) recvMethod(f frame) {
switch frame := f.(type) {
case *methodFrame:
if msg, ok := frame.Method.(messageWithContent); ok {
ch.body = make([]byte, 0)
ch.message = msg
- return ch.transition((*Channel).recvHeader)
+ ch.transition((*Channel).recvHeader)
+ return
}
ch.dispatch(frame.Method) // termination state
- return ch.transition((*Channel).recvMethod)
+ ch.transition((*Channel).recvMethod)
case *headerFrame:
// drop
- return ch.transition((*Channel).recvMethod)
+ ch.transition((*Channel).recvMethod)
case *bodyFrame:
// drop
- return ch.transition((*Channel).recvMethod)
- }
+ ch.transition((*Channel).recvMethod)
- panic("unexpected frame type")
+ default:
+ panic("unexpected frame type")
+ }
}
-func (ch *Channel) recvHeader(f frame) error {
+func (ch *Channel) recvHeader(f frame) {
switch frame := f.(type) {
case *methodFrame:
// interrupt content and handle method
- return ch.recvMethod(f)
+ ch.recvMethod(f)
case *headerFrame:
// start collecting if we expect body frames
@@ -376,29 +422,31 @@ func (ch *Channel) recvHeader(f frame) error {
if frame.Size == 0 {
ch.message.setContent(ch.header.Properties, ch.body)
ch.dispatch(ch.message) // termination state
- return ch.transition((*Channel).recvMethod)
+ ch.transition((*Channel).recvMethod)
+ return
}
- return ch.transition((*Channel).recvContent)
+ ch.transition((*Channel).recvContent)
case *bodyFrame:
// drop and reset
- return ch.transition((*Channel).recvMethod)
- }
+ ch.transition((*Channel).recvMethod)
- panic("unexpected frame type")
+ default:
+ panic("unexpected frame type")
+ }
}
// state after method + header and before the length
// defined by the header has been reached
-func (ch *Channel) recvContent(f frame) error {
+func (ch *Channel) recvContent(f frame) {
switch frame := f.(type) {
case *methodFrame:
// interrupt content and handle method
- return ch.recvMethod(f)
+ ch.recvMethod(f)
case *headerFrame:
// drop and reset
- return ch.transition((*Channel).recvMethod)
+ ch.transition((*Channel).recvMethod)
case *bodyFrame:
if cap(ch.body) == 0 {
@@ -409,13 +457,15 @@ func (ch *Channel) recvContent(f frame) error {
if uint64(len(ch.body)) >= ch.header.Size {
ch.message.setContent(ch.header.Properties, ch.body)
ch.dispatch(ch.message) // termination state
- return ch.transition((*Channel).recvMethod)
+ ch.transition((*Channel).recvMethod)
+ return
}
- return ch.transition((*Channel).recvContent)
- }
+ ch.transition((*Channel).recvContent)
- panic("unexpected frame type")
+ default:
+ panic("unexpected frame type")
+ }
}
/*
@@ -423,9 +473,12 @@ Close initiate a clean channel closure by sending a close message with the error
code set to '200'.
It is safe to call this method multiple times.
-
*/
func (ch *Channel) Close() error {
+ if ch.IsClosed() {
+ return nil
+ }
+
defer ch.connection.closeChannel(ch, nil)
return ch.call(
&channelClose{ReplyCode: replySuccess},
@@ -433,6 +486,12 @@ func (ch *Channel) Close() error {
)
}
+// IsClosed returns true if the channel is marked as closed, otherwise false
+// is returned.
+func (ch *Channel) IsClosed() bool {
+ return atomic.LoadInt32(&ch.closed) == 1
+}
+
/*
NotifyClose registers a listener for when the server sends a channel or
connection exception in the form of a Connection.Close or Channel.Close method.
@@ -443,6 +502,8 @@ this channel.
The chan provided will be closed when the Channel is closed and on a
graceful close, no error will be sent.
+In case of a non graceful close the error will be notified synchronously by the library
+so that it will be necessary to consume the Channel from the caller in order to avoid deadlocks
*/
func (ch *Channel) NotifyClose(c chan *Error) chan *Error {
ch.notifyM.Lock()
@@ -488,7 +549,6 @@ much on the same connection, all channels using that connection will suffer,
including acknowledgments from deliveries. Use different Connections if you
desire to interleave consumers and producers in the same process to avoid your
basic.ack messages from getting rate limited with your basic.publish messages.
-
*/
func (ch *Channel) NotifyFlow(c chan bool) chan bool {
ch.notifyM.Lock()
@@ -510,7 +570,6 @@ immediate flags.
A return struct has a copy of the Publishing along with some error
information about why the publishing failed.
-
*/
func (ch *Channel) NotifyReturn(c chan Return) chan Return {
ch.notifyM.Lock()
@@ -531,7 +590,6 @@ from the server when a queue is deleted or when consuming from a mirrored queue
where the master has just failed (and was moved to another node).
The subscription tag is returned to the listener.
-
*/
func (ch *Channel) NotifyCancel(c chan string) chan string {
ch.notifyM.Lock()
@@ -594,6 +652,8 @@ or Channel while confirms are in-flight.
It's advisable to wait for all Confirmations to arrive before calling
Channel.Close() or Connection.Close().
+It is also advisable for the caller to consume from the channel returned till it is closed
+to avoid possible deadlocks
*/
func (ch *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation {
ch.notifyM.Lock()
@@ -606,7 +666,6 @@ func (ch *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation {
}
return confirm
-
}
/*
@@ -672,7 +731,6 @@ When noWait is true, do not wait for the server to acknowledge the cancel.
Only use this when you are certain there are no deliveries in flight that
require an acknowledgment, otherwise they will arrive and be dropped in the
client without an ack, and will not be redelivered to other consumers.
-
*/
func (ch *Channel) Cancel(consumer string, noWait bool) error {
req := &basicCancel{
@@ -705,12 +763,12 @@ the type "direct" with the routing key matching the queue's name. With this
default binding, it is possible to publish messages that route directly to
this queue by publishing to "" with the routing key of the queue name.
- QueueDeclare("alerts", true, false, false, false, nil)
- Publish("", "alerts", false, false, Publishing{Body: []byte("...")})
+ QueueDeclare("alerts", true, false, false, false, nil)
+ Publish("", "alerts", false, false, Publishing{Body: []byte("...")})
- Delivery Exchange Key Queue
- -----------------------------------------------
- key: alerts -> "" -> alerts -> alerts
+ Delivery Exchange Key Queue
+ -----------------------------------------------
+ key: alerts -> "" -> alerts -> alerts
The queue name may be empty, in which case the server will generate a unique name
which will be returned in the Name field of Queue struct.
@@ -746,7 +804,6 @@ or attempting to modify an existing queue from a different connection.
When the error return value is not nil, you can assume the queue could not be
declared with these parameters, and the channel will be closed.
-
*/
func (ch *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) {
if err := args.Validate(); err != nil {
@@ -780,13 +837,11 @@ func (ch *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noW
}
/*
-
QueueDeclarePassive is functionally and parametrically equivalent to
QueueDeclare, except that it sets the "passive" attribute to true. A passive
queue is assumed by RabbitMQ to already exist, and attempting to connect to a
non-existent queue will cause RabbitMQ to throw an exception. This function
can be used to test for the existence of a queue.
-
*/
func (ch *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) {
if err := args.Validate(); err != nil {
@@ -833,6 +888,7 @@ declared with specific parameters.
If a queue by this name does not exist, an error will be returned and the
channel will be closed.
+Deprecated: Use QueueDeclare with "Passive: true" instead.
*/
func (ch *Channel) QueueInspect(name string) (Queue, error) {
req := &queueDeclare{
@@ -857,14 +913,14 @@ QueueBind binds an exchange to a queue so that publishings to the exchange will
be routed to the queue when the publishing routing key matches the binding
routing key.
- QueueBind("pagers", "alert", "log", false, nil)
- QueueBind("emails", "info", "log", false, nil)
+ QueueBind("pagers", "alert", "log", false, nil)
+ QueueBind("emails", "info", "log", false, nil)
- Delivery Exchange Key Queue
- -----------------------------------------------
- key: alert --> log ----> alert --> pagers
- key: info ---> log ----> info ---> emails
- key: debug --> log (none) (dropped)
+ Delivery Exchange Key Queue
+ -----------------------------------------------
+ key: alert --> log ----> alert --> pagers
+ key: info ---> log ----> info ---> emails
+ key: debug --> log (none) (dropped)
If a binding with the same key and arguments already exists between the
exchange and queue, the attempt to rebind will be ignored and the existing
@@ -874,16 +930,16 @@ In the case that multiple bindings may cause the message to be routed to the
same queue, the server will only route the publishing once. This is possible
with topic exchanges.
- QueueBind("pagers", "alert", "amq.topic", false, nil)
- QueueBind("emails", "info", "amq.topic", false, nil)
- QueueBind("emails", "#", "amq.topic", false, nil) // match everything
+ QueueBind("pagers", "alert", "amq.topic", false, nil)
+ QueueBind("emails", "info", "amq.topic", false, nil)
+ QueueBind("emails", "#", "amq.topic", false, nil) // match everything
- Delivery Exchange Key Queue
- -----------------------------------------------
- key: alert --> amq.topic ----> alert --> pagers
- key: info ---> amq.topic ----> # ------> emails
- \---> info ---/
- key: debug --> amq.topic ----> # ------> emails
+ Delivery Exchange Key Queue
+ -----------------------------------------------
+ key: alert --> amq.topic ----> alert --> pagers
+ key: info ---> amq.topic ----> # ------> emails
+ \---> info ---/
+ key: debug --> amq.topic ----> # ------> emails
It is only possible to bind a durable queue to a durable exchange regardless of
whether the queue or exchange is auto-deleted. Bindings between durable queues
@@ -894,7 +950,6 @@ will be closed.
When noWait is false and the queue could not be bound, the channel will be
closed with an error.
-
*/
func (ch *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error {
if err := args.Validate(); err != nil {
@@ -919,7 +974,6 @@ arguments.
It is possible to send and empty string for the exchange name which means to
unbind the queue from the default exchange.
-
*/
func (ch *Channel) QueueUnbind(name, key, exchange string, args Table) error {
if err := args.Validate(); err != nil {
@@ -976,7 +1030,6 @@ When noWait is true, the queue will be deleted without waiting for a response
from the server. The purged message count will not be meaningful. If the queue
could not be deleted, a channel exception will be raised and the channel will
be closed.
-
*/
func (ch *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) {
req := &queueDelete{
@@ -1043,11 +1096,11 @@ Inflight messages, limited by Channel.Qos will be buffered until received from
the returned chan.
When the Channel or Connection is closed, all buffered and inflight messages will
-be dropped.
+be dropped. RabbitMQ will requeue messages not acknowledged. In other words, dropped
+messages in this way won't be lost.
When the consumer tag is cancelled, all inflight messages will be delivered until
the returned chan is closed.
-
*/
func (ch *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) {
// When we return from ch.call, there may be a delivery already for the
@@ -1082,7 +1135,122 @@ func (ch *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal,
return nil, err
}
- return (<-chan Delivery)(deliveries), nil
+ return deliveries, nil
+}
+
+/*
+ConsumeWithContext immediately starts delivering queued messages.
+
+This function is similar to Channel.Consume, and accepts a context to control
+consumer lifecycle. When the context passed to this function is canceled, the
+consumer associated with the deliveries channel will be canceled too. When the
+context passed to this function is cancelled, the deliveries channel will be closed.
+
+An application is advised to keep on receiving messages from the delivery channel
+until the channel is empty. This is specially important to avoid memory leaks from
+unconsumed messages from the delivery channel.
+
+Begin receiving on the returned chan Delivery before any other operation on the
+Connection or Channel.
+
+Continues deliveries to the returned chan Delivery until Channel.Cancel,
+Connection.Close, Channel.Close, context is cancelled, or an AMQP exception
+occurs. Consumers must range over the chan to ensure all deliveries are
+received. Unreceived deliveries will block all methods on the same connection.
+
+All deliveries in AMQP must be acknowledged. It is expected of the consumer to
+call Delivery.Ack after it has successfully processed the delivery. If the
+consumer is cancelled or the channel or connection is closed any unacknowledged
+deliveries will be requeued at the end of the same queue.
+
+The consumer is identified by a string that is unique and scoped for all
+consumers on this channel. If you wish to eventually cancel the consumer, use
+the same non-empty identifier in Channel.Cancel. An empty string will cause
+the library to generate a unique identity. The consumer identity will be
+included in every Delivery in the ConsumerTag field
+
+When autoAck (also known as noAck) is true, the server will acknowledge
+deliveries to this consumer prior to writing the delivery to the network. When
+autoAck is true, the consumer should not call Delivery.Ack. Automatically
+acknowledging deliveries means that some deliveries may get lost if the
+consumer is unable to process them after the server delivers them.
+See http://www.rabbitmq.com/confirms.html for more details.
+
+When exclusive is true, the server will ensure that this is the sole consumer
+from this queue. When exclusive is false, the server will fairly distribute
+deliveries across multiple consumers.
+
+The noLocal flag is not supported by RabbitMQ.
+
+It's advisable to use separate connections for Channel.Publish and
+Channel.Consume so not to have TCP pushback on publishing affect the ability to
+consume messages, so this parameter is here mostly for completeness.
+
+When noWait is true, do not wait for the server to confirm the request and
+immediately begin deliveries. If it is not possible to consume, a channel
+exception will be raised and the channel will be closed.
+
+Optional arguments can be provided that have specific semantics for the queue
+or server.
+
+Inflight messages, limited by Channel.Qos will be buffered until received from
+the returned chan.
+
+When the Channel or Connection is closed, all buffered and inflight messages will
+be dropped. RabbitMQ will requeue messages not acknowledged. In other words, dropped
+messages in this way won't be lost.
+*/
+func (ch *Channel) ConsumeWithContext(ctx context.Context, queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) {
+ // When we return from ch.call, there may be a delivery already for the
+ // consumer that hasn't been added to the consumer hash yet. Because of
+ // this, we never rely on the server picking a consumer tag for us.
+
+ if err := args.Validate(); err != nil {
+ return nil, err
+ }
+
+ if consumer == "" {
+ consumer = uniqueConsumerTag()
+ }
+
+ req := &basicConsume{
+ Queue: queue,
+ ConsumerTag: consumer,
+ NoLocal: noLocal,
+ NoAck: autoAck,
+ Exclusive: exclusive,
+ NoWait: noWait,
+ Arguments: args,
+ }
+ res := &basicConsumeOk{}
+
+ select {
+ default:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+
+ deliveries := make(chan Delivery)
+
+ ch.consumers.add(consumer, deliveries)
+
+ if err := ch.call(req, res); err != nil {
+ ch.consumers.cancel(consumer)
+ return nil, err
+ }
+
+ go func() {
+ select {
+ case <-ch.consumers.closed:
+ return
+ case <-ctx.Done():
+ if ch != nil {
+ _ = ch.Cancel(consumer, false)
+ }
+ }
+ }()
+
+ return deliveries, nil
}
/*
@@ -1126,7 +1294,7 @@ Note: RabbitMQ declares the default exchange types like 'amq.fanout' as
durable, so queues that bind to these pre-declared exchanges must also be
durable.
-Exchanges declared as `internal` do not accept accept publishings. Internal
+Exchanges declared as `internal` do not accept publishings. Internal
exchanges are useful when you wish to implement inter-exchange topologies
that should not be exposed to users of the broker.
@@ -1158,13 +1326,11 @@ func (ch *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, inter
}
/*
-
ExchangeDeclarePassive is functionally and parametrically equivalent to
ExchangeDeclare, except that it sets the "passive" attribute to true. A passive
exchange is assumed by RabbitMQ to already exist, and attempting to connect to a
non-existent exchange will cause RabbitMQ to throw an exception. This function
can be used to detect the existence of an exchange.
-
*/
func (ch *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error {
if err := args.Validate(); err != nil {
@@ -1227,14 +1393,14 @@ exchange even though multiple bindings will match.
Given a message delivered to the source exchange, the message will be forwarded
to the destination exchange when the routing key is matched.
- ExchangeBind("sell", "MSFT", "trade", false, nil)
- ExchangeBind("buy", "AAPL", "trade", false, nil)
+ ExchangeBind("sell", "MSFT", "trade", false, nil)
+ ExchangeBind("buy", "AAPL", "trade", false, nil)
- Delivery Source Key Destination
- example exchange exchange
- -----------------------------------------------
- key: AAPL --> trade ----> MSFT sell
- \---> AAPL --> buy
+ Delivery Source Key Destination
+ example exchange exchange
+ -----------------------------------------------
+ key: AAPL --> trade ----> MSFT sell
+ \---> AAPL --> buy
When noWait is true, do not wait for the server to confirm the binding. If any
error occurs the channel will be closed. Add a listener to NotifyClose to
@@ -1322,15 +1488,85 @@ confirmations start at 1. Exit when all publishings are confirmed.
When Publish does not return an error and the channel is in confirm mode, the
internal counter for DeliveryTags with the first confirmation starts at 1.
+Deprecated: Use PublishWithContext instead.
*/
func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error {
+ _, err := ch.PublishWithDeferredConfirmWithContext(context.Background(), exchange, key, mandatory, immediate, msg)
+ return err
+}
+
+/*
+PublishWithContext sends a Publishing from the client to an exchange on the server.
+
+When you want a single message to be delivered to a single queue, you can
+publish to the default exchange with the routingKey of the queue name. This is
+because every declared queue gets an implicit route to the default exchange.
+
+Since publishings are asynchronous, any undeliverable message will get returned
+by the server. Add a listener with Channel.NotifyReturn to handle any
+undeliverable message when calling publish with either the mandatory or
+immediate parameters as true.
+
+Publishings can be undeliverable when the mandatory flag is true and no queue is
+bound that matches the routing key, or when the immediate flag is true and no
+consumer on the matched queue is ready to accept the delivery.
+
+This can return an error when the channel, connection or socket is closed. The
+error or lack of an error does not indicate whether the server has received this
+publishing.
+
+It is possible for publishing to not reach the broker if the underlying socket
+is shut down without pending publishing packets being flushed from the kernel
+buffers. The easy way of making it probable that all publishings reach the
+server is to always call Connection.Close before terminating your publishing
+application. The way to ensure that all publishings reach the server is to add
+a listener to Channel.NotifyPublish and put the channel in confirm mode with
+Channel.Confirm. Publishing delivery tags and their corresponding
+confirmations start at 1. Exit when all publishings are confirmed.
+
+When Publish does not return an error and the channel is in confirm mode, the
+internal counter for DeliveryTags with the first confirmation starts at 1.
+*/
+func (ch *Channel) PublishWithContext(ctx context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) error {
+ _, err := ch.PublishWithDeferredConfirmWithContext(ctx, exchange, key, mandatory, immediate, msg)
+ return err
+}
+
+/*
+PublishWithDeferredConfirm behaves identically to Publish but additionally returns a
+DeferredConfirmation, allowing the caller to wait on the publisher confirmation
+for this message. If the channel has not been put into confirm mode,
+the DeferredConfirmation will be nil.
+
+Deprecated: Use PublishWithDeferredConfirmWithContext instead.
+*/
+func (ch *Channel) PublishWithDeferredConfirm(exchange, key string, mandatory, immediate bool, msg Publishing) (*DeferredConfirmation, error) {
+ return ch.PublishWithDeferredConfirmWithContext(context.Background(), exchange, key, mandatory, immediate, msg)
+}
+
+/*
+PublishWithDeferredConfirmWithContext behaves identically to Publish but additionally returns a
+DeferredConfirmation, allowing the caller to wait on the publisher confirmation
+for this message. If the channel has not been put into confirm mode,
+the DeferredConfirmation will be nil.
+*/
+func (ch *Channel) PublishWithDeferredConfirmWithContext(ctx context.Context, exchange, key string, mandatory, immediate bool, msg Publishing) (*DeferredConfirmation, error) {
+ if ctx == nil {
+ return nil, errors.New("amqp091-go: nil Context")
+ }
+
if err := msg.Headers.Validate(); err != nil {
- return err
+ return nil, err
}
ch.m.Lock()
defer ch.m.Unlock()
+ var dc *DeferredConfirmation
+ if ch.confirming {
+ dc = ch.confirms.publish()
+ }
+
if err := ch.send(&basicPublish{
Exchange: exchange,
RoutingKey: key,
@@ -1353,14 +1589,13 @@ func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg
AppId: msg.AppId,
},
}); err != nil {
- return err
- }
-
- if ch.confirming {
- ch.confirms.Publish()
+ if ch.confirming {
+ ch.confirms.unpublish()
+ }
+ return nil, err
}
- return nil
+ return dc, nil
}
/*
@@ -1379,7 +1614,6 @@ delivery.
When autoAck is true, the server will automatically acknowledge this message so
you don't have to. But if you are unable to fully process this message before
the channel or connection is closed, the message will not get requeued.
-
*/
func (ch *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) {
req := &basicGet{Queue: queue, NoAck: autoAck}
@@ -1411,7 +1645,6 @@ the channel is in a transaction is not defined.
Once a channel has been put into transaction mode, it cannot be taken out of
transaction mode. Use a different channel for non-transactional semantics.
-
*/
func (ch *Channel) Tx() error {
return ch.call(
@@ -1425,7 +1658,6 @@ TxCommit atomically commits all publishings and acknowledgments for a single
queue and immediately start a new transaction.
Calling this method without having called Channel.Tx is an error.
-
*/
func (ch *Channel) TxCommit() error {
return ch.call(
@@ -1439,7 +1671,6 @@ TxRollback atomically rolls back all publishings and acknowledgments for a
single queue and immediately start a new transaction.
Calling this method without having called Channel.Tx is an error.
-
*/
func (ch *Channel) TxRollback() error {
return ch.call(
@@ -1469,7 +1700,6 @@ pause its publishings when `false` is sent on that channel.
Note: RabbitMQ prefers to use TCP push back to control flow for all channels on
a connection, so under high volume scenarios, it's wise to open separate
Connections for publishings and deliveries.
-
*/
func (ch *Channel) Flow(active bool) error {
return ch.call(
@@ -1501,7 +1731,6 @@ persisting the message if necessary.
When noWait is true, the client will not wait for a response. A channel
exception could occur if the server does not support this method.
-
*/
func (ch *Channel) Confirm(noWait bool) error {
if err := ch.call(
@@ -1530,6 +1759,11 @@ If the deliveries cannot be recovered, an error will be returned and the channel
will be closed.
Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead
+
+Deprecated: This method is deprecated in RabbitMQ. RabbitMQ used Recover(true)
+as a mechanism for consumers to tell the broker that they were ready for more
+deliveries, back in 2008-2009. Support for this will be removed from RabbitMQ in
+a future release. Use Nack() with requeue=true instead.
*/
func (ch *Channel) Recover(requeue bool) error {
return ch.call(
@@ -1591,3 +1825,12 @@ func (ch *Channel) Reject(tag uint64, requeue bool) error {
Requeue: requeue,
})
}
+
+// GetNextPublishSeqNo returns the sequence number of the next message to be
+// published, when in confirm mode.
+func (ch *Channel) GetNextPublishSeqNo() uint64 {
+ ch.confirms.m.Lock()
+ defer ch.confirms.m.Unlock()
+
+ return ch.confirms.published + 1
+}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/confirms.go b/vendor/github.com/rabbitmq/amqp091-go/confirms.go
new file mode 100644
index 0000000000..577e042bcc
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/confirms.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "context"
+ "sync"
+)
+
+// confirms resequences and notifies one or multiple publisher confirmation listeners
+type confirms struct {
+ m sync.Mutex
+ listeners []chan Confirmation
+ sequencer map[uint64]Confirmation
+ deferredConfirmations *deferredConfirmations
+ published uint64
+ publishedMut sync.Mutex
+ expecting uint64
+}
+
+// newConfirms allocates a confirms
+func newConfirms() *confirms {
+ return &confirms{
+ sequencer: map[uint64]Confirmation{},
+ deferredConfirmations: newDeferredConfirmations(),
+ published: 0,
+ expecting: 1,
+ }
+}
+
+func (c *confirms) Listen(l chan Confirmation) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.listeners = append(c.listeners, l)
+}
+
+// Publish increments the publishing counter
+func (c *confirms) publish() *DeferredConfirmation {
+ c.publishedMut.Lock()
+ defer c.publishedMut.Unlock()
+
+ c.published++
+ return c.deferredConfirmations.Add(c.published)
+}
+
+// unpublish decrements the publishing counter and removes the
+// DeferredConfirmation. It must be called immediately after a publish fails.
+func (c *confirms) unpublish() {
+ c.publishedMut.Lock()
+ defer c.publishedMut.Unlock()
+ c.deferredConfirmations.remove(c.published)
+ c.published--
+}
+
+// confirm confirms one publishing, increments the expecting delivery tag, and
+// removes bookkeeping for that delivery tag.
+func (c *confirms) confirm(confirmation Confirmation) {
+ delete(c.sequencer, c.expecting)
+ c.expecting++
+ for _, l := range c.listeners {
+ l <- confirmation
+ }
+}
+
+// resequence confirms any out of order delivered confirmations
+func (c *confirms) resequence() {
+ c.publishedMut.Lock()
+ defer c.publishedMut.Unlock()
+
+ for c.expecting <= c.published {
+ sequenced, found := c.sequencer[c.expecting]
+ if !found {
+ return
+ }
+ c.confirm(sequenced)
+ }
+}
+
+// One confirms one publishing and all following in the publishing sequence
+func (c *confirms) One(confirmed Confirmation) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.deferredConfirmations.Confirm(confirmed)
+
+ if c.expecting == confirmed.DeliveryTag {
+ c.confirm(confirmed)
+ } else {
+ c.sequencer[confirmed.DeliveryTag] = confirmed
+ }
+ c.resequence()
+}
+
+// Multiple confirms all publishings up until the delivery tag
+func (c *confirms) Multiple(confirmed Confirmation) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.deferredConfirmations.ConfirmMultiple(confirmed)
+
+ for c.expecting <= confirmed.DeliveryTag {
+ c.confirm(Confirmation{c.expecting, confirmed.Ack})
+ }
+ c.resequence()
+}
+
+// Cleans up the confirms struct and its dependencies.
+// Closes all listeners, discarding any out of sequence confirmations
+func (c *confirms) Close() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.deferredConfirmations.Close()
+
+ for _, l := range c.listeners {
+ close(l)
+ }
+ c.listeners = nil
+ return nil
+}
+
+type deferredConfirmations struct {
+ m sync.Mutex
+ confirmations map[uint64]*DeferredConfirmation
+}
+
+func newDeferredConfirmations() *deferredConfirmations {
+ return &deferredConfirmations{
+ confirmations: map[uint64]*DeferredConfirmation{},
+ }
+}
+
+func (d *deferredConfirmations) Add(tag uint64) *DeferredConfirmation {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ dc := &DeferredConfirmation{DeliveryTag: tag}
+ dc.done = make(chan struct{})
+ d.confirmations[tag] = dc
+ return dc
+}
+
+// remove is only used to drop a tag whose publish failed
+func (d *deferredConfirmations) remove(tag uint64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+ dc, found := d.confirmations[tag]
+ if !found {
+ return
+ }
+ close(dc.done)
+ delete(d.confirmations, tag)
+}
+
+func (d *deferredConfirmations) Confirm(confirmation Confirmation) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ dc, found := d.confirmations[confirmation.DeliveryTag]
+ if !found {
+ // We should never receive a confirmation for a tag that hasn't
+ // been published, but a test causes this to happen.
+ return
+ }
+ dc.setAck(confirmation.Ack)
+ delete(d.confirmations, confirmation.DeliveryTag)
+}
+
+func (d *deferredConfirmations) ConfirmMultiple(confirmation Confirmation) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ for k, v := range d.confirmations {
+ if k <= confirmation.DeliveryTag {
+ v.setAck(confirmation.Ack)
+ delete(d.confirmations, k)
+ }
+ }
+}
+
+// Close nacks all pending DeferredConfirmations being blocked by dc.Wait().
+func (d *deferredConfirmations) Close() {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ for k, v := range d.confirmations {
+ v.setAck(false)
+ delete(d.confirmations, k)
+ }
+}
+
+// setAck sets the acknowledgement status of the confirmation. Note that it must
+// not be called more than once.
+func (d *DeferredConfirmation) setAck(ack bool) {
+ d.ack = ack
+ close(d.done)
+}
+
+// Done returns the channel that can be used to wait for the publisher
+// confirmation.
+func (d *DeferredConfirmation) Done() <-chan struct{} {
+ return d.done
+}
+
+// Acked returns the publisher confirmation in a non-blocking manner. It returns
+// false if the confirmation was not acknowledged yet or received negative
+// acknowledgement.
+func (d *DeferredConfirmation) Acked() bool {
+ select {
+ case <-d.done:
+ default:
+ return false
+ }
+ return d.ack
+}
+
+// Wait blocks until the publisher confirmation. It returns true if the server
+// successfully received the publishing.
+func (d *DeferredConfirmation) Wait() bool {
+ <-d.done
+ return d.ack
+}
+
+// WaitContext waits until the publisher confirmation. It returns true if the
+// server successfully received the publishing. If the context expires before
+// that, ctx.Err() is returned.
+func (d *DeferredConfirmation) WaitContext(ctx context.Context) (bool, error) {
+ select {
+ case <-ctx.Done():
+ return false, ctx.Err()
+ case <-d.done:
+ }
+ return d.ack, nil
+}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/connection.go b/vendor/github.com/rabbitmq/amqp091-go/connection.go
new file mode 100644
index 0000000000..c8bb820d15
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/connection.go
@@ -0,0 +1,1119 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "bufio"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ maxChannelMax = (2 << 15) - 1
+
+ defaultHeartbeat = 10 * time.Second
+ defaultConnectionTimeout = 30 * time.Second
+ defaultProduct = "AMQP 0.9.1 Client"
+ buildVersion = "1.9.0"
+ platform = "golang"
+ // Safer default that makes channel leaks a lot easier to spot
+ // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593.
+ defaultChannelMax = (2 << 10) - 1
+ defaultLocale = "en_US"
+)
+
+// Config is used in DialConfig and Open to specify the desired tuning
+// parameters used during a connection open handshake. The negotiated tuning
+// will be stored in the returned connection's Config field.
+type Config struct {
+ // The SASL mechanisms to try in the client request, and the successful
+ // mechanism used on the Connection object.
+ // If SASL is nil, PlainAuth from the URL is used.
+ SASL []Authentication
+
+ // Vhost specifies the namespace of permissions, exchanges, queues and
+ // bindings on the server. Dial sets this to the path parsed from the URL.
+ Vhost string
+
+ ChannelMax int // 0 max channels means 2^16 - 1
+ FrameSize int // 0 max bytes means unlimited
+ Heartbeat time.Duration // less than 1s uses the server's interval
+
+ // TLSClientConfig specifies the client configuration of the TLS connection
+ // when establishing a tls transport.
+ // If the URL uses an amqps scheme, then an empty tls.Config with the
+ // ServerName from the URL is used.
+ TLSClientConfig *tls.Config
+
+ // Properties is table of properties that the client advertises to the server.
+ // This is an optional setting - if the application does not set this,
+ // the underlying library will use a generic set of client properties.
+ Properties Table
+
+ // Connection locale that we expect to always be en_US
+ // Even though servers must return it as per the AMQP 0-9-1 spec,
+ // we are not aware of it being used other than to satisfy the spec requirements
+ Locale string
+
+ // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig,
+ // then an AMQP connection handshake.
+ // If Dial is nil, net.DialTimeout with a 30s connection and 30s deadline is
+ // used during TLS and AMQP handshaking.
+ Dial func(network, addr string) (net.Conn, error)
+}
+
+// NewConnectionProperties creates an amqp.Table to be used as amqp.Config.Properties.
+//
+// Defaults to library-defined values. For empty properties, use make(amqp.Table) instead.
+func NewConnectionProperties() Table {
+ return Table{
+ "product": defaultProduct,
+ "version": buildVersion,
+ "platform": platform,
+ }
+}
+
+// Connection manages the serialization and deserialization of frames from IO
+// and dispatches the frames to the appropriate channel. All RPC methods and
+// asynchronous Publishing, Delivery, Ack, Nack and Return messages are
+// multiplexed on this channel. There must always be active receivers for
+// every asynchronous message on this connection.
+type Connection struct {
+ destructor sync.Once // shutdown once
+ sendM sync.Mutex // conn writer mutex
+ m sync.Mutex // struct field mutex
+
+ conn io.ReadWriteCloser
+
+ rpc chan message
+ writer *writer
+ sends chan time.Time // timestamps of each frame sent
+ deadlines chan readDeadliner // heartbeater updates read deadlines
+
+ allocator *allocator // id generator valid after openTune
+ channels map[uint16]*Channel
+
+ noNotify bool // true when we will never notify again
+ closes []chan *Error
+ blocks []chan Blocking
+
+ errors chan *Error
+ // if connection is closed should close this chan
+ close chan struct{}
+
+ Config Config // The negotiated Config after connection.open
+
+ Major int // Server's major version
+ Minor int // Server's minor version
+ Properties Table // Server properties
+ Locales []string // Server locales
+
+ closed int32 // Will be 1 if the connection is closed, 0 otherwise. Should only be accessed as atomic
+}
+
+type readDeadliner interface {
+ SetReadDeadline(time.Time) error
+}
+
+// DefaultDial establishes a connection when config.Dial is not provided
+func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (net.Conn, error) {
+ return func(network, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(network, addr, connectionTimeout)
+ if err != nil {
+ return nil, err
+ }
+
+ // Heartbeating hasn't started yet, don't stall forever on a dead server.
+ // A deadline is set for TLS and AMQP handshaking. After AMQP is established,
+ // the deadline is cleared in openComplete.
+ if err := conn.SetDeadline(time.Now().Add(connectionTimeout)); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+ }
+}
+
+// Dial accepts a string in the AMQP URI format and returns a new Connection
+// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
+// seconds and sets the handshake deadline to 30 seconds. After handshake,
+// deadlines are cleared.
+//
+// Dial uses the zero value of tls.Config when it encounters an amqps://
+// scheme. It is equivalent to calling DialTLS(amqp, nil).
+func Dial(url string) (*Connection, error) {
+ return DialConfig(url, Config{
+ Heartbeat: defaultHeartbeat,
+ Locale: defaultLocale,
+ })
+}
+
+// DialTLS accepts a string in the AMQP URI format and returns a new Connection
+// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
+// seconds and sets the initial read deadline to 30 seconds.
+//
+// DialTLS uses the provided tls.Config when encountering an amqps:// scheme.
+func DialTLS(url string, amqps *tls.Config) (*Connection, error) {
+ return DialConfig(url, Config{
+ Heartbeat: defaultHeartbeat,
+ TLSClientConfig: amqps,
+ Locale: defaultLocale,
+ })
+}
+
+// DialTLS_ExternalAuth accepts a string in the AMQP URI format and returns a
+// new Connection over TCP using EXTERNAL auth. Defaults to a server heartbeat
+// interval of 10 seconds and sets the initial read deadline to 30 seconds.
+//
+// This mechanism is used, when RabbitMQ is configured for EXTERNAL auth with
+// ssl_cert_login plugin for userless/passwordless logons
+//
+// DialTLS_ExternalAuth uses the provided tls.Config when encountering an
+// amqps:// scheme.
+func DialTLS_ExternalAuth(url string, amqps *tls.Config) (*Connection, error) {
+ return DialConfig(url, Config{
+ Heartbeat: defaultHeartbeat,
+ TLSClientConfig: amqps,
+ SASL: []Authentication{&ExternalAuth{}},
+ })
+}
+
+// DialConfig accepts a string in the AMQP URI format and a configuration for
+// the transport and connection setup, returning a new Connection. Defaults to
+// a server heartbeat interval of 10 seconds and sets the initial read deadline
+// to 30 seconds.
+func DialConfig(url string, config Config) (*Connection, error) {
+ var err error
+ var conn net.Conn
+
+ uri, err := ParseURI(url)
+ if err != nil {
+ return nil, err
+ }
+
+ if config.SASL == nil {
+ config.SASL = []Authentication{uri.PlainAuth()}
+ }
+
+ if config.Vhost == "" {
+ config.Vhost = uri.Vhost
+ }
+
+ addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10))
+
+ dialer := config.Dial
+ if dialer == nil {
+ dialer = DefaultDial(defaultConnectionTimeout)
+ }
+
+ conn, err = dialer("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+
+ if uri.Scheme == "amqps" {
+ if config.TLSClientConfig == nil {
+ tlsConfig, err := tlsConfigFromURI(uri)
+ if err != nil {
+ return nil, fmt.Errorf("create TLS config from URI: %w", err)
+ }
+ config.TLSClientConfig = tlsConfig
+ }
+
+ // If ServerName has not been specified in TLSClientConfig,
+ // set it to the URI host used for this connection.
+ if config.TLSClientConfig.ServerName == "" {
+ config.TLSClientConfig.ServerName = uri.Host
+ }
+
+ client := tls.Client(conn, config.TLSClientConfig)
+ if err := client.Handshake(); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ conn = client
+ }
+
+ return Open(conn, config)
+}
+
+/*
+Open accepts an already established connection, or other io.ReadWriteCloser as
+a transport. Use this method if you have established a TLS connection or wish
+to use your own custom transport.
+*/
+func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) {
+ c := &Connection{
+ conn: conn,
+ writer: &writer{bufio.NewWriter(conn)},
+ channels: make(map[uint16]*Channel),
+ rpc: make(chan message),
+ sends: make(chan time.Time),
+ errors: make(chan *Error, 1),
+ close: make(chan struct{}),
+ deadlines: make(chan readDeadliner, 1),
+ }
+ go c.reader(conn)
+ return c, c.open(config)
+}
+
+/*
+UpdateSecret updates the secret used to authenticate this connection. It is used when
+secrets have an expiration date and need to be renewed, like OAuth 2 tokens.
+
+It returns an error if the operation is not successful, or if the connection is closed.
+*/
+func (c *Connection) UpdateSecret(newSecret, reason string) error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+ return c.call(&connectionUpdateSecret{
+ NewSecret: newSecret,
+ Reason: reason,
+ }, &connectionUpdateSecretOk{})
+}
+
+/*
+LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr)
+as a fallback default value if the underlying transport does not support LocalAddr().
+*/
+func (c *Connection) LocalAddr() net.Addr {
+ if conn, ok := c.conn.(interface {
+ LocalAddr() net.Addr
+ }); ok {
+ return conn.LocalAddr()
+ }
+ return &net.TCPAddr{}
+}
+
+/*
+RemoteAddr returns the remote TCP peer address, if known.
+*/
+func (c *Connection) RemoteAddr() net.Addr {
+ if conn, ok := c.conn.(interface {
+ RemoteAddr() net.Addr
+ }); ok {
+ return conn.RemoteAddr()
+ }
+ return &net.TCPAddr{}
+}
+
+// ConnectionState returns basic TLS details of the underlying transport.
+// Returns a zero value when the underlying connection does not implement
+// ConnectionState() tls.ConnectionState.
+func (c *Connection) ConnectionState() tls.ConnectionState {
+ if conn, ok := c.conn.(interface {
+ ConnectionState() tls.ConnectionState
+ }); ok {
+ return conn.ConnectionState()
+ }
+ return tls.ConnectionState{}
+}
+
+/*
+NotifyClose registers a listener for close events either initiated by an error
+accompanying a connection.close method or by a normal shutdown.
+
+The chan provided will be closed when the Connection is closed and on a
+graceful close, no error will be sent.
+
+In case of a non graceful close the error will be notified synchronously by the library
+so that it will be necessary to consume the Channel from the caller in order to avoid deadlocks
+
+To reconnect after a transport or protocol error, register a listener here and
+re-run your setup process.
+*/
+func (c *Connection) NotifyClose(receiver chan *Error) chan *Error {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.noNotify {
+ close(receiver)
+ } else {
+ c.closes = append(c.closes, receiver)
+ }
+
+ return receiver
+}
+
+/*
+NotifyBlocked registers a listener for RabbitMQ specific TCP flow control
+method extensions connection.blocked and connection.unblocked. Flow control is
+active with a reason when Blocking.Blocked is true. When a Connection is
+blocked, all methods will block across all connections until server resources
+become free again.
+
+This optional extension is supported by the server when the
+"connection.blocked" server capability key is true.
+*/
+func (c *Connection) NotifyBlocked(receiver chan Blocking) chan Blocking {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.noNotify {
+ close(receiver)
+ } else {
+ c.blocks = append(c.blocks, receiver)
+ }
+
+ return receiver
+}
+
+/*
+Close requests and waits for the response to close the AMQP connection.
+
+It's advisable to use this message when publishing to ensure all kernel buffers
+have been flushed on the server and client before exiting.
+
+An error indicates that server may not have received this request to close but
+the connection should be treated as closed regardless.
+
+After returning from this call, all resources associated with this connection,
+including the underlying io, Channels, Notify listeners and Channel consumers
+will also be closed.
+*/
+func (c *Connection) Close() error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+
+ defer c.shutdown(nil)
+ return c.call(
+ &connectionClose{
+ ReplyCode: replySuccess,
+ ReplyText: "kthxbai",
+ },
+ &connectionCloseOk{},
+ )
+}
+
+// CloseDeadline requests and waits for the response to close this AMQP connection.
+//
+// Accepts a deadline for waiting the server response. The deadline is passed
+// to the low-level connection i.e. network socket.
+//
+// Regardless of the error returned, the connection is considered closed, and it
+// should not be used after calling this function.
+//
+// In the event of an I/O timeout, connection-closed listeners are NOT informed.
+//
+// After returning from this call, all resources associated with this connection,
+// including the underlying io, Channels, Notify listeners and Channel consumers
+// will also be closed.
+func (c *Connection) CloseDeadline(deadline time.Time) error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+
+ defer c.shutdown(nil)
+
+ err := c.setDeadline(deadline)
+ if err != nil {
+ return err
+ }
+
+ return c.call(
+ &connectionClose{
+ ReplyCode: replySuccess,
+ ReplyText: "kthxbai",
+ },
+ &connectionCloseOk{},
+ )
+}
+
+func (c *Connection) closeWith(err *Error) error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+
+ defer c.shutdown(err)
+
+ return c.call(
+ &connectionClose{
+ ReplyCode: uint16(err.Code),
+ ReplyText: err.Reason,
+ },
+ &connectionCloseOk{},
+ )
+}
+
+// IsClosed returns true if the connection is marked as closed, otherwise false
+// is returned.
+func (c *Connection) IsClosed() bool {
+ return atomic.LoadInt32(&c.closed) == 1
+}
+
+// setDeadline is a wrapper to type assert Connection.conn and set an I/O
+// deadline in the underlying TCP connection socket, by calling
+// net.Conn.SetDeadline(). It returns an error, in case the type assertion fails,
+// although this should never happen.
+func (c *Connection) setDeadline(t time.Time) error {
+ con, ok := c.conn.(net.Conn)
+ if !ok {
+ return errInvalidTypeAssertion
+ }
+ return con.SetDeadline(t)
+}
+
+func (c *Connection) send(f frame) error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+
+ c.sendM.Lock()
+ err := c.writer.WriteFrame(f)
+ c.sendM.Unlock()
+
+ if err != nil {
+ // shutdown could be re-entrant from signaling notify chans
+ go c.shutdown(&Error{
+ Code: FrameError,
+ Reason: err.Error(),
+ })
+ } else {
+ // Broadcast we sent a frame, reducing heartbeats, only
+ // if there is something that can receive - like a non-reentrant
+ // call or if the heartbeater isn't running
+ select {
+ case c.sends <- time.Now():
+ default:
+ }
+ }
+
+ return err
+}
+
+// This method is intended to be used with sendUnflushed() to end a sequence
+// of sendUnflushed() calls and flush the connection
+func (c *Connection) endSendUnflushed() error {
+ c.sendM.Lock()
+ defer c.sendM.Unlock()
+ return c.flush()
+}
+
+// sendUnflushed performs an *Unflushed* write. It is otherwise equivalent to
+// send(), and we provide a separate flush() function to explicitly flush the
+// buffer after all Frames are written.
+//
+// Why is this a thing?
+//
+// send() method uses writer.WriteFrame(), which will write the Frame then
+// flush the buffer. For cases like the sendOpen() method on Channel, which
+// sends multiple Frames (methodFrame, headerFrame, N x bodyFrame), flushing
+// after each Frame is inefficient as it negates much of the benefit of using a
+// buffered writer, and results in more syscalls than necessary. Flushing buffers
+// after every frame can have a significant performance impact when sending
+// (basicPublish) small messages, so this method performs an *Unflushed* write
+// but is otherwise equivalent to send() method, and we provide a separate
+// flush method to explicitly flush the buffer after all Frames are written.
+func (c *Connection) sendUnflushed(f frame) error {
+ if c.IsClosed() {
+ return ErrClosed
+ }
+
+ c.sendM.Lock()
+ err := c.writer.WriteFrameNoFlush(f)
+ c.sendM.Unlock()
+
+ if err != nil {
+ // shutdown could be re-entrant from signaling notify chans
+ go c.shutdown(&Error{
+ Code: FrameError,
+ Reason: err.Error(),
+ })
+ }
+
+ return err
+}
+
+// This method is intended to be used with sendUnflushed() to explicitly flush
+// the buffer after all required Frames have been written to the buffer.
+func (c *Connection) flush() (err error) {
+ if buf, ok := c.writer.w.(*bufio.Writer); ok {
+ err = buf.Flush()
+
+ // Moving send notifier to flush increases basicPublish for the small message
+ // case. As sendUnflushed + flush is used for the case of sending semantically
+ // related Frames (e.g. a Message like basicPublish) there is no real advantage
+ // to sending per Frame vice per "group of related Frames" and for the case of
+ // small messages time.Now() is (relatively) expensive.
+ if err == nil {
+ // Broadcast we sent a frame, reducing heartbeats, only
+ // if there is something that can receive - like a non-reentrant
+ // call or if the heartbeater isn't running
+ select {
+ case c.sends <- time.Now():
+ default:
+ }
+ }
+ }
+
+ return
+}
+
+func (c *Connection) shutdown(err *Error) {
+ atomic.StoreInt32(&c.closed, 1)
+
+ c.destructor.Do(func() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if err != nil {
+ for _, c := range c.closes {
+ c <- err
+ }
+ c.errors <- err
+ }
+ // Shutdown handler goroutine can still receive the result.
+ close(c.errors)
+
+ for _, c := range c.closes {
+ close(c)
+ }
+
+ for _, c := range c.blocks {
+ close(c)
+ }
+
+ // Shutdown the channel, but do not use closeChannel() as it calls
+ // releaseChannel() which requires the connection lock.
+ //
+ // Ranging over c.channels and calling releaseChannel() that mutates
+ // c.channels is racy - see commit 6063341 for an example.
+ for _, ch := range c.channels {
+ ch.shutdown(err)
+ }
+
+ c.conn.Close()
+ // reader exit
+ close(c.close)
+
+ c.channels = nil
+ c.allocator = nil
+ c.noNotify = true
+ })
+}
+
+// All methods sent to the connection channel should be synchronous so we
+// can handle them directly without a framing component
+func (c *Connection) demux(f frame) {
+ if f.channel() == 0 {
+ c.dispatch0(f)
+ } else {
+ c.dispatchN(f)
+ }
+}
+
+func (c *Connection) dispatch0(f frame) {
+ switch mf := f.(type) {
+ case *methodFrame:
+ switch m := mf.Method.(type) {
+ case *connectionClose:
+ // Send immediately as shutdown will close our side of the writer.
+ f := &methodFrame{ChannelId: 0, Method: &connectionCloseOk{}}
+ if err := c.send(f); err != nil {
+ Logger.Printf("error sending connectionCloseOk, error: %+v", err)
+ }
+ c.shutdown(newError(m.ReplyCode, m.ReplyText))
+ case *connectionBlocked:
+ for _, c := range c.blocks {
+ c <- Blocking{Active: true, Reason: m.Reason}
+ }
+ case *connectionUnblocked:
+ for _, c := range c.blocks {
+ c <- Blocking{Active: false}
+ }
+ default:
+ select {
+ case <-c.close:
+ return
+ case c.rpc <- m:
+ }
+
+ }
+ case *heartbeatFrame:
+ // kthx - all reads reset our deadline. so we can drop this
+ default:
+ // lolwat - channel0 only responds to methods and heartbeats
+ // closeWith use call don't block reader
+ go func() {
+ if err := c.closeWith(ErrUnexpectedFrame); err != nil {
+ Logger.Printf("error sending connectionCloseOk with ErrUnexpectedFrame, error: %+v", err)
+ }
+ }()
+ }
+}
+
+func (c *Connection) dispatchN(f frame) {
+ c.m.Lock()
+ channel, ok := c.channels[f.channel()]
+ if ok {
+ updateChannel(f, channel)
+ } else {
+ Logger.Printf("[debug] dropping frame, channel %d does not exist", f.channel())
+ }
+ c.m.Unlock()
+
+ // Note: this could result in concurrent dispatch depending on
+ // how channels are managed in an application
+ if ok {
+ channel.recv(channel, f)
+ } else {
+ c.dispatchClosed(f)
+ }
+}
+
+// section 2.3.7: "When a peer decides to close a channel or connection, it
+// sends a Close method. The receiving peer MUST respond to a Close with a
+// Close-Ok, and then both parties can close their channel or connection. Note
+// that if peers ignore Close, deadlock can happen when both peers send Close
+// at the same time."
+//
+// When we don't have a channel, so we must respond with close-ok on a close
+// method. This can happen between a channel exception on an asynchronous
+// method like basic.publish and a synchronous close with channel.close.
+// In that case, we'll get both a channel.close and channel.close-ok in any
+// order.
+func (c *Connection) dispatchClosed(f frame) {
+ // Only consider method frames, drop content/header frames
+ if mf, ok := f.(*methodFrame); ok {
+ switch mf.Method.(type) {
+ case *channelClose:
+ f := &methodFrame{ChannelId: f.channel(), Method: &channelCloseOk{}}
+ if err := c.send(f); err != nil {
+ Logger.Printf("error sending channelCloseOk, channel id: %d error: %+v", f.channel(), err)
+ }
+ case *channelCloseOk:
+ // we are already closed, so do nothing
+ default:
+ // unexpected method on closed channel
+ // closeWith use call don't block reader
+ go func() {
+ if err := c.closeWith(ErrClosed); err != nil {
+ Logger.Printf("error sending connectionCloseOk with ErrClosed, error: %+v", err)
+ }
+ }()
+ }
+ }
+}
+
+// Reads each frame off the IO and hand off to the connection object that
+// will demux the streams and dispatch to one of the opened channels or
+// handle on channel 0 (the connection channel).
+func (c *Connection) reader(r io.Reader) {
+ buf := bufio.NewReader(r)
+ frames := &reader{buf}
+ conn, haveDeadliner := r.(readDeadliner)
+
+ defer close(c.rpc)
+
+ for {
+ frame, err := frames.ReadFrame()
+
+ if err != nil {
+ c.shutdown(&Error{Code: FrameError, Reason: err.Error()})
+ return
+ }
+
+ c.demux(frame)
+
+ if haveDeadliner {
+ select {
+ case c.deadlines <- conn:
+ default:
+ // On c.Close() c.heartbeater() might exit just before c.deadlines <- conn is called.
+ // Which results in this goroutine being stuck forever.
+ }
+ }
+ }
+}
+
+// Ensures that at least one frame is being sent at the tuned interval with a
+// jitter tolerance of 1s
+func (c *Connection) heartbeater(interval time.Duration, done chan *Error) {
+ const maxServerHeartbeatsInFlight = 3
+
+ var sendTicks <-chan time.Time
+ if interval > 0 {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ sendTicks = ticker.C
+ }
+
+ lastSent := time.Now()
+
+ for {
+ select {
+ case at, stillSending := <-c.sends:
+ // When actively sending, depend on sent frames to reset server timer
+ if stillSending {
+ lastSent = at
+ } else {
+ return
+ }
+
+ case at := <-sendTicks:
+ // When idle, fill the space with a heartbeat frame
+ if at.Sub(lastSent) > interval-time.Second {
+ if err := c.send(&heartbeatFrame{}); err != nil {
+ // send heartbeats even after close/closeOk so we
+ // tick until the connection starts erroring
+ return
+ }
+ }
+
+ case conn := <-c.deadlines:
+ // When reading, reset our side of the deadline, if we've negotiated one with
+ // a deadline that covers at least 2 server heartbeats
+ if interval > 0 {
+ if err := conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)); err != nil {
+ var opErr *net.OpError
+ if !errors.As(err, &opErr) {
+ Logger.Printf("error setting read deadline in heartbeater: %+v", err)
+ return
+ }
+ }
+ }
+
+ case <-done:
+ return
+ }
+ }
+}
+
+// Convenience method to inspect the Connection.Properties["capabilities"]
+// Table for server identified capabilities like "basic.ack" or
+// "confirm.select".
+func (c *Connection) isCapable(featureName string) bool {
+ capabilities, _ := c.Properties["capabilities"].(Table)
+ hasFeature, _ := capabilities[featureName].(bool)
+ return hasFeature
+}
+
+// allocateChannel records but does not open a new channel with a unique id.
+// This method is the initial part of the channel lifecycle and paired with
+// releaseChannel
+func (c *Connection) allocateChannel() (*Channel, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.IsClosed() {
+ return nil, ErrClosed
+ }
+
+ id, ok := c.allocator.next()
+ if !ok {
+ return nil, ErrChannelMax
+ }
+
+ ch := newChannel(c, uint16(id))
+ c.channels[uint16(id)] = ch
+
+ return ch, nil
+}
+
+// releaseChannel removes a channel from the registry as the final part of the
+// channel lifecycle
+func (c *Connection) releaseChannel(ch *Channel) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if !c.IsClosed() {
+ got, ok := c.channels[ch.id]
+ if ok && got == ch {
+ delete(c.channels, ch.id)
+ c.allocator.release(int(ch.id))
+ }
+ }
+}
+
+// openChannel allocates and opens a channel, must be paired with closeChannel
+func (c *Connection) openChannel() (*Channel, error) {
+ ch, err := c.allocateChannel()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ch.open(); err != nil {
+ c.releaseChannel(ch)
+ return nil, err
+ }
+ return ch, nil
+}
+
+// closeChannel releases and initiates a shutdown of the channel. All channel
+// closures should be initiated here for proper channel lifecycle management on
+// this connection.
+func (c *Connection) closeChannel(ch *Channel, e *Error) {
+ ch.shutdown(e)
+ c.releaseChannel(ch)
+}
+
+/*
+Channel opens a unique, concurrent server channel to process the bulk of AMQP
+messages. Any error from methods on this receiver will render the receiver
+invalid and a new Channel should be opened.
+*/
+func (c *Connection) Channel() (*Channel, error) {
+ return c.openChannel()
+}
+
+func (c *Connection) call(req message, res ...message) error {
+ // Special case for when the protocol header frame is sent insted of a
+ // request method
+ if req != nil {
+ if err := c.send(&methodFrame{ChannelId: 0, Method: req}); err != nil {
+ return err
+ }
+ }
+
+ var msg message
+ select {
+ case e, ok := <-c.errors:
+ if ok {
+ return e
+ }
+ return ErrClosed
+ case msg = <-c.rpc:
+ }
+
+ // Try to match one of the result types
+ for _, try := range res {
+ if reflect.TypeOf(msg) == reflect.TypeOf(try) {
+ // *res = *msg
+ vres := reflect.ValueOf(try).Elem()
+ vmsg := reflect.ValueOf(msg).Elem()
+ vres.Set(vmsg)
+ return nil
+ }
+ }
+ return ErrCommandInvalid
+}
+
+// Communication flow to open, use and close a connection. 'C:' are
+// frames sent by the Client. 'S:' are frames sent by the Server.
+//
+// Connection = open-Connection *use-Connection close-Connection
+//
+// open-Connection = C:protocol-header
+// S:START C:START-OK
+// *challenge
+// S:TUNE C:TUNE-OK
+// C:OPEN S:OPEN-OK
+//
+// challenge = S:SECURE C:SECURE-OK
+//
+// use-Connection = *channel
+//
+// close-Connection = C:CLOSE S:CLOSE-OK
+// S:CLOSE C:CLOSE-OK
+func (c *Connection) open(config Config) error {
+ if err := c.send(&protocolHeader{}); err != nil {
+ return err
+ }
+
+ return c.openStart(config)
+}
+
+func (c *Connection) openStart(config Config) error {
+ start := &connectionStart{}
+
+ if err := c.call(nil, start); err != nil {
+ return err
+ }
+
+ c.Major = int(start.VersionMajor)
+ c.Minor = int(start.VersionMinor)
+ c.Properties = start.ServerProperties
+ c.Locales = strings.Split(start.Locales, " ")
+
+ // eventually support challenge/response here by also responding to
+ // connectionSecure.
+ auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " "))
+ if !ok {
+ return ErrSASL
+ }
+
+ // Save this mechanism off as the one we chose
+ c.Config.SASL = []Authentication{auth}
+
+ // Set the connection locale to client locale
+ c.Config.Locale = config.Locale
+
+ return c.openTune(config, auth)
+}
+
+func (c *Connection) openTune(config Config, auth Authentication) error {
+ if len(config.Properties) == 0 {
+ config.Properties = NewConnectionProperties()
+ }
+
+ config.Properties["capabilities"] = Table{
+ "connection.blocked": true,
+ "consumer_cancel_notify": true,
+ "basic.nack": true,
+ "publisher_confirms": true,
+ }
+
+ ok := &connectionStartOk{
+ ClientProperties: config.Properties,
+ Mechanism: auth.Mechanism(),
+ Response: auth.Response(),
+ Locale: config.Locale,
+ }
+ tune := &connectionTune{}
+
+ if err := c.call(ok, tune); err != nil {
+ // per spec, a connection can only be closed when it has been opened
+ // so at this point, we know it's an auth error, but the socket
+ // was closed instead. Return a meaningful error.
+ return ErrCredentials
+ }
+
+ // Edge case that may race with c.shutdown()
+ // https://github.com/rabbitmq/amqp091-go/issues/170
+ c.m.Lock()
+
+ // When the server and client both use default 0, then the max channel is
+ // only limited by uint16.
+ c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax))
+ if c.Config.ChannelMax == 0 {
+ c.Config.ChannelMax = defaultChannelMax
+ }
+ c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax)
+
+ c.allocator = newAllocator(1, c.Config.ChannelMax)
+
+ c.m.Unlock()
+
+ // Frame size includes headers and end byte (len(payload)+8), even if
+ // this is less than FrameMinSize, use what the server sends because the
+ // alternative is to stop the handshake here.
+ c.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax))
+
+ // Save this off for resetDeadline()
+ c.Config.Heartbeat = time.Second * time.Duration(pick(
+ int(config.Heartbeat/time.Second),
+ int(tune.Heartbeat)))
+
+ // "The client should start sending heartbeats after receiving a
+ // Connection.Tune method"
+ go c.heartbeater(c.Config.Heartbeat/2, c.NotifyClose(make(chan *Error, 1)))
+
+ if err := c.send(&methodFrame{
+ ChannelId: 0,
+ Method: &connectionTuneOk{
+ ChannelMax: uint16(c.Config.ChannelMax),
+ FrameMax: uint32(c.Config.FrameSize),
+ Heartbeat: uint16(c.Config.Heartbeat / time.Second),
+ },
+ }); err != nil {
+ return err
+ }
+
+ return c.openVhost(config)
+}
+
+func (c *Connection) openVhost(config Config) error {
+ req := &connectionOpen{VirtualHost: config.Vhost}
+ res := &connectionOpenOk{}
+
+ if err := c.call(req, res); err != nil {
+ // Cannot be closed yet, but we know it's a vhost problem
+ return ErrVhost
+ }
+
+ c.Config.Vhost = config.Vhost
+
+ return c.openComplete()
+}
+
+// openComplete performs any final Connection initialization dependent on the
+// connection handshake and clears any state needed for TLS and AMQP handshaking.
+func (c *Connection) openComplete() error {
+ // We clear the deadlines and let the heartbeater reset the read deadline if requested.
+ // RabbitMQ uses TCP flow control at this point for pushback so Writes can
+ // intentionally block.
+ if deadliner, ok := c.conn.(interface {
+ SetDeadline(time.Time) error
+ }); ok {
+ _ = deadliner.SetDeadline(time.Time{})
+ }
+
+ return nil
+}
+
+// tlsConfigFromURI tries to create TLS configuration based on query parameters.
+// Returns default (empty) config in case no suitable client cert and/or client key not provided.
+// Returns error in case certificates can not be parsed.
+func tlsConfigFromURI(uri URI) (*tls.Config, error) {
+ var certPool *x509.CertPool
+ if uri.CACertFile != "" {
+ data, err := os.ReadFile(uri.CACertFile)
+ if err != nil {
+ return nil, fmt.Errorf("read CA certificate: %w", err)
+ }
+
+ certPool = x509.NewCertPool()
+ certPool.AppendCertsFromPEM(data)
+ } else if sysPool, err := x509.SystemCertPool(); err != nil {
+ return nil, fmt.Errorf("load system certificates: %w", err)
+ } else {
+ certPool = sysPool
+ }
+
+ if uri.CertFile == "" || uri.KeyFile == "" {
+ // no client auth (mTLS), just server auth
+ return &tls.Config{
+ RootCAs: certPool,
+ ServerName: uri.ServerName,
+ }, nil
+ }
+
+ certificate, err := tls.LoadX509KeyPair(uri.CertFile, uri.KeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("load client certificate: %w", err)
+ }
+
+ return &tls.Config{
+ Certificates: []tls.Certificate{certificate},
+ RootCAs: certPool,
+ ServerName: uri.ServerName,
+ }, nil
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func pick(client, server int) int {
+ if client == 0 || server == 0 {
+ return max(client, server)
+ }
+ return min(client, server)
+}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/consumers.go b/vendor/github.com/rabbitmq/amqp091-go/consumers.go
new file mode 100644
index 0000000000..c352fece97
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/consumers.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "os"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+var consumerSeq uint64
+
+const consumerTagLengthMax = 0xFF // see writeShortstr
+
+func uniqueConsumerTag() string {
+ return commandNameBasedUniqueConsumerTag(os.Args[0])
+}
+
+func commandNameBasedUniqueConsumerTag(commandName string) string {
+ tagPrefix := "ctag-"
+ tagInfix := commandName
+ tagSuffix := "-" + strconv.FormatUint(atomic.AddUint64(&consumerSeq, 1), 10)
+
+ if len(tagPrefix)+len(tagInfix)+len(tagSuffix) > consumerTagLengthMax {
+ tagInfix = "streadway/amqp"
+ }
+
+ return tagPrefix + tagInfix + tagSuffix
+}
+
+type consumerBuffers map[string]chan *Delivery
+
+// Concurrent type that manages the consumerTag ->
+// ingress consumerBuffer mapping
+type consumers struct {
+ sync.WaitGroup // one for buffer
+ closed chan struct{} // signal buffer
+
+ sync.Mutex // protects below
+ chans consumerBuffers
+}
+
+func makeConsumers() *consumers {
+ return &consumers{
+ closed: make(chan struct{}),
+ chans: make(consumerBuffers),
+ }
+}
+
+func (subs *consumers) buffer(in chan *Delivery, out chan Delivery) {
+ defer close(out)
+ defer subs.Done()
+
+ var inflight = in
+ var queue []*Delivery
+
+ for delivery := range in {
+ queue = append(queue, delivery)
+
+ for len(queue) > 0 {
+ select {
+ case <-subs.closed:
+ // closed before drained, drop in-flight
+ return
+
+ case delivery, consuming := <-inflight:
+ if consuming {
+ queue = append(queue, delivery)
+ } else {
+ inflight = nil
+ }
+
+ case out <- *queue[0]:
+ /*
+ * https://github.com/rabbitmq/amqp091-go/issues/179
+ * https://github.com/rabbitmq/amqp091-go/pull/180
+ *
+ * Comment from @lars-t-hansen:
+ *
+ * Given Go's slice semantics, and barring any information
+ * available to the compiler that proves that queue is the only
+ * pointer to the memory it references, the only meaning that
+ * queue = queue[1:] can have is basically queue += sizeof(queue
+ * element), ie, it bumps a pointer. Looking at the generated
+ * code for a simple example (on ARM64 in this case) bears this
+ * out. So what we're left with is an array that we have a
+ * pointer into the middle of. When the GC traces this pointer,
+ * it too does not know whether the array has multiple
+ * referents, and so its only sensible choice is to find the
+ * beginning of the array, and if the array is not already
+ * visited, mark every element in it, including the "dead"
+ * pointer.
+ *
+ * (Depending on the program dynamics, an element may eventually
+ * be appended to the queue when the queue is at capacity, and
+ * in this case the live elements are copied into a new array
+ * and the old array is left to be GC'd eventually, along with
+ * the dead object. But that can take time.)
+ */
+ queue[0] = nil
+ queue = queue[1:]
+ }
+ }
+ }
+}
+
+// On key conflict, close the previous channel.
+func (subs *consumers) add(tag string, consumer chan Delivery) {
+ subs.Lock()
+ defer subs.Unlock()
+
+ if prev, found := subs.chans[tag]; found {
+ close(prev)
+ }
+
+ in := make(chan *Delivery)
+ subs.chans[tag] = in
+
+ subs.Add(1)
+ go subs.buffer(in, consumer)
+}
+
+func (subs *consumers) cancel(tag string) (found bool) {
+ subs.Lock()
+ defer subs.Unlock()
+
+ ch, found := subs.chans[tag]
+
+ if found {
+ delete(subs.chans, tag)
+ close(ch)
+ }
+
+ return found
+}
+
+func (subs *consumers) close() {
+ subs.Lock()
+ defer subs.Unlock()
+
+ close(subs.closed)
+
+ for tag, ch := range subs.chans {
+ delete(subs.chans, tag)
+ close(ch)
+ }
+
+ subs.Wait()
+}
+
+// Sends a delivery to a the consumer identified by `tag`.
+// If unbuffered channels are used for Consume this method
+// could block all deliveries until the consumer
+// receives on the other end of the channel.
+func (subs *consumers) send(tag string, msg *Delivery) bool {
+ subs.Lock()
+ defer subs.Unlock()
+
+ buffer, found := subs.chans[tag]
+ if found {
+ buffer <- msg
+ }
+
+ return found
+}
diff --git a/vendor/github.com/streadway/amqp/delivery.go b/vendor/github.com/rabbitmq/amqp091-go/delivery.go
similarity index 96%
rename from vendor/github.com/streadway/amqp/delivery.go
rename to vendor/github.com/rabbitmq/amqp091-go/delivery.go
index 7241264423..e94cf34370 100644
--- a/vendor/github.com/streadway/amqp/delivery.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/delivery.go
@@ -1,9 +1,9 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-package amqp
+package amqp091
import (
"errors"
@@ -13,7 +13,7 @@ import (
var errDeliveryNotInitialized = errors.New("delivery not initialized")
// Acknowledger notifies the server of successful or failed consumption of
-// delivieries via identifier found in the Delivery.DeliveryTag field.
+// deliveries via identifier found in the Delivery.DeliveryTag field.
//
// Applications can provide mock implementations in tests of Delivery handlers.
type Acknowledger interface {
diff --git a/vendor/github.com/rabbitmq/amqp091-go/doc.go b/vendor/github.com/rabbitmq/amqp091-go/doc.go
new file mode 100644
index 0000000000..8cb0b64f04
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/doc.go
@@ -0,0 +1,165 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package amqp091 is an AMQP 0.9.1 client with RabbitMQ extensions
+
+Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much
+of the terminology in this library directly relates to AMQP concepts.
+
+ Resources
+
+ http://www.rabbitmq.com/tutorials/amqp-concepts.html
+ http://www.rabbitmq.com/getstarted.html
+ http://www.rabbitmq.com/amqp-0-9-1-reference.html
+
+# Design
+
+Most other broker clients publish to queues, but in AMQP, clients publish
+Exchanges instead. AMQP is programmable, meaning that both the producers and
+consumers agree on the configuration of the broker, instead of requiring an
+operator or system configuration that declares the logical topology in the
+broker. The routing between producers and consumer queues is via Bindings.
+These bindings form the logical topology of the broker.
+
+In this library, a message sent from publisher is called a "Publishing" and a
+message received to a consumer is called a "Delivery". The fields of
+Publishings and Deliveries are close but not exact mappings to the underlying
+wire format to maintain stronger types. Many other libraries will combine
+message properties with message headers. In this library, the message well
+known properties are strongly typed fields on the Publishings and Deliveries,
+whereas the user defined headers are in the Headers field.
+
+The method naming closely matches the protocol's method name with positional
+parameters mapping to named protocol message fields. The motivation here is to
+present a comprehensive view over all possible interactions with the server.
+
+Generally, methods that map to protocol methods of the "basic" class will be
+elided in this interface, and "select" methods of various channel mode selectors
+will be elided for example Channel.Confirm and Channel.Tx.
+
+The library is intentionally designed to be synchronous, where responses for
+each protocol message are required to be received in an RPC manner. Some
+methods have a noWait parameter like Channel.QueueDeclare, and some methods are
+asynchronous like Channel.Publish. The error values should still be checked for
+these methods as they will indicate IO failures like when the underlying
+connection closes.
+
+# Asynchronous Events
+
+Clients of this library may be interested in receiving some of the protocol
+messages other than Deliveries like basic.ack methods while a channel is in
+confirm mode.
+
+The Notify* methods with Connection and Channel receivers model the pattern of
+asynchronous events like closes due to exceptions, or messages that are sent out
+of band from an RPC call like basic.ack or basic.flow.
+
+Any asynchronous events, including Deliveries and Publishings must always have
+a receiver until the corresponding chans are closed. Without asynchronous
+receivers, the synchronous methods will block.
+
+# Use Case
+
+It's important as a client to an AMQP topology to ensure the state of the
+broker matches your expectations. For both publish and consume use cases,
+make sure you declare the queues, exchanges and bindings you expect to exist
+prior to calling [Channel.PublishWithContext] or [Channel.Consume].
+
+ // Connections start with amqp.Dial() typically from a command line argument
+ // or environment variable.
+ connection, err := amqp.Dial(os.Getenv("AMQP_URL"))
+
+ // To cleanly shutdown by flushing kernel buffers, make sure to close and
+ // wait for the response.
+ defer connection.Close()
+
+ // Most operations happen on a channel. If any error is returned on a
+ // channel, the channel will no longer be valid, throw it away and try with
+ // a different channel. If you use many channels, it's useful for the
+ // server to
+ channel, err := connection.Channel()
+
+ // Declare your topology here, if it doesn't exist, it will be created, if
+ // it existed already and is not what you expect, then that's considered an
+ // error.
+
+ // Use your connection on this topology with either Publish or Consume, or
+ // inspect your queues with QueueInspect. It's unwise to mix Publish and
+ // Consume to let TCP do its job well.
+
+# SSL/TLS - Secure connections
+
+When Dial encounters an amqps:// scheme, it will use the zero value of a
+tls.Config. This will only perform server certificate and host verification.
+
+Use DialTLS when you wish to provide a client certificate (recommended),
+include a private certificate authority's certificate in the cert chain for
+server validity, or run insecure by not verifying the server certificate dial
+your own connection. DialTLS will use the provided tls.Config when it
+encounters an amqps:// scheme and will dial a plain connection when it
+encounters an amqp:// scheme.
+
+SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html
+
+# Best practises for Connection and Channel notifications:
+
+In order to be notified when a connection or channel gets closed, both
+structures offer the possibility to register channels using
+[Channel.NotifyClose] and [Connection.NotifyClose] functions:
+
+ notifyConnCloseCh := conn.NotifyClose(make(chan *amqp.Error))
+
+No errors will be sent in case of a graceful connection close. In case of a
+non-graceful closure due to e.g. network issue, or forced connection closure
+from the Management UI, the error will be notified synchronously by the library.
+
+The error is sent synchronously to the channel, so that the flow will wait until
+the receiver consumes from the channel. To avoid deadlocks in the library, it is
+necessary to consume from the channels. This could be done inside a
+different goroutine with a select listening on the two channels inside a for
+loop like:
+
+ go func() {
+ for notifyConnClose != nil || notifyChanClose != nil {
+ select {
+ case err, ok := <-notifyConnClose:
+ if !ok {
+ notifyConnClose = nil
+ } else {
+ fmt.Printf("connection closed, error %s", err)
+ }
+ case err, ok := <-notifyChanClose:
+ if !ok {
+ notifyChanClose = nil
+ } else {
+ fmt.Printf("channel closed, error %s", err)
+ }
+ }
+ }
+ }()
+
+Another approach is to use buffered channels:
+
+ notifyConnCloseCh := conn.NotifyClose(make(chan *amqp.Error, 1))
+
+The library sends to notification channels just once. After sending a notification
+to all channels, the library closes all registered notification channels. After
+receiving a notification, the application should create and register a new channel.
+
+# Best practises for NotifyPublish notifications:
+
+Using [Channel.NotifyPublish] allows the caller of the library to be notified,
+through a go channel, when a message has been received and confirmed by the
+broker. It's advisable to wait for all Confirmations to arrive before calling
+[Channel.Close] or [Connection.Close]. It is also necessary to consume from this
+channel until it gets closed. The library sends synchronously to the registered channel.
+It is advisable to use a buffered channel, with capacity set to the maximum acceptable
+number of unconfirmed messages.
+
+It is important to consume from the confirmation channel at all times, in order to avoid
+deadlocks in the library.
+*/
+package amqp091
diff --git a/vendor/github.com/rabbitmq/amqp091-go/fuzz.go b/vendor/github.com/rabbitmq/amqp091-go/fuzz.go
new file mode 100644
index 0000000000..c9f03ea4e6
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/fuzz.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gofuzz
+// +build gofuzz
+
+package amqp091
+
+import "bytes"
+
+func Fuzz(data []byte) int {
+ r := reader{bytes.NewReader(data)}
+ frame, err := r.ReadFrame()
+ if err != nil {
+ if frame != nil {
+ panic("frame is not nil")
+ }
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/rabbitmq/amqp091-go/gen.ps1 b/vendor/github.com/rabbitmq/amqp091-go/gen.ps1
new file mode 100644
index 0000000000..c93354316f
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/gen.ps1
@@ -0,0 +1,14 @@
+$DebugPreference = 'Continue'
+$ErrorActionPreference = 'Stop'
+
+Set-PSDebug -Off
+Set-StrictMode -Version 'Latest' -ErrorAction 'Stop' -Verbose
+
+New-Variable -Name curdir -Option Constant -Value $PSScriptRoot
+
+$specDir = Resolve-Path -LiteralPath (Join-Path -Path $curdir -ChildPath 'spec')
+$amqpSpecXml = Resolve-Path -LiteralPath (Join-Path -Path $specDir -ChildPath 'amqp0-9-1.stripped.extended.xml')
+$gen = Resolve-Path -LiteralPath (Join-Path -Path $specDir -ChildPath 'gen.go')
+$spec091 = Resolve-Path -LiteralPath (Join-Path -Path $curdir -ChildPath 'spec091.go')
+
+Get-Content -LiteralPath $amqpSpecXml | go run $gen | gofmt | Set-Content -Force -Path $spec091
diff --git a/vendor/github.com/streadway/amqp/gen.sh b/vendor/github.com/rabbitmq/amqp091-go/gen.sh
similarity index 100%
rename from vendor/github.com/streadway/amqp/gen.sh
rename to vendor/github.com/rabbitmq/amqp091-go/gen.sh
diff --git a/vendor/github.com/rabbitmq/amqp091-go/log.go b/vendor/github.com/rabbitmq/amqp091-go/log.go
new file mode 100644
index 0000000000..7540f137af
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/log.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2022 VMware, Inc. or its affiliates. All Rights Reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+type Logging interface {
+ Printf(format string, v ...interface{})
+}
+
+var Logger Logging = NullLogger{}
+
+// Enables logging using a custom Logging instance. Note that this is
+// not thread safe and should be called at application start
+func SetLogger(logger Logging) {
+ Logger = logger
+}
+
+type NullLogger struct {
+}
+
+func (l NullLogger) Printf(format string, v ...interface{}) {
+}
diff --git a/vendor/github.com/streadway/amqp/read.go b/vendor/github.com/rabbitmq/amqp091-go/read.go
similarity index 83%
rename from vendor/github.com/streadway/amqp/read.go
rename to vendor/github.com/rabbitmq/amqp091-go/read.go
index 3aa0b33811..a8bed13795 100644
--- a/vendor/github.com/streadway/amqp/read.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/read.go
@@ -1,9 +1,9 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-package amqp
+package amqp091
import (
"bytes"
@@ -14,29 +14,29 @@ import (
)
/*
-Reads a frame from an input stream and returns an interface that can be cast into
+ReadFrame reads a frame from an input stream and returns an interface that can be cast into
one of the following:
- methodFrame
- PropertiesFrame
- bodyFrame
- heartbeatFrame
+ methodFrame
+ PropertiesFrame
+ bodyFrame
+ heartbeatFrame
2.3.5 frame Details
All frames consist of a header (7 octets), a payload of arbitrary size, and a
'frame-end' octet that detects malformed frames:
- 0 1 3 7 size+7 size+8
- +------+---------+-------------+ +------------+ +-----------+
- | type | channel | size | | payload | | frame-end |
- +------+---------+-------------+ +------------+ +-----------+
- octet short long size octets octet
+ 0 1 3 7 size+7 size+8
+ +------+---------+-------------+ +------------+ +-----------+
+ | type | channel | size | | payload | | frame-end |
+ +------+---------+-------------+ +------------+ +-----------+
+ octet short long size octets octet
To read a frame, we:
- 1. Read the header and check the frame type and channel.
- 2. Depending on the frame type, we read the payload and process it.
- 3. Read the frame end octet.
+ 1. Read the header and check the frame type and channel.
+ 2. Depending on the frame type, we read the payload and process it.
+ 3. Read the frame end octet.
In realistic implementations where performance is a concern, we would use
“read-ahead buffering” or
@@ -50,7 +50,7 @@ func (r *reader) ReadFrame() (frame frame, err error) {
return
}
- typ := uint8(scratch[0])
+ typ := scratch[0]
channel := binary.BigEndian.Uint16(scratch[1:3])
size := binary.BigEndian.Uint32(scratch[3:7])
@@ -131,20 +131,6 @@ func readDecimal(r io.Reader) (v Decimal, err error) {
return
}
-func readFloat32(r io.Reader) (v float32, err error) {
- if err = binary.Read(r, binary.BigEndian, &v); err != nil {
- return
- }
- return
-}
-
-func readFloat64(r io.Reader) (v float64, err error) {
- if err = binary.Read(r, binary.BigEndian, &v); err != nil {
- return
- }
- return
-}
-
func readTimestamp(r io.Reader) (v time.Time, err error) {
var sec int64
if err = binary.Read(r, binary.BigEndian, &sec); err != nil {
@@ -161,7 +147,8 @@ func readTimestamp(r io.Reader) (v time.Time, err error) {
'S': string
'T': time.Time
'V': nil
-'b': byte
+'b': int8
+'B': byte
'd': float64
'f': float32
'l': int64
@@ -181,15 +168,22 @@ func readField(r io.Reader) (v interface{}, err error) {
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
return
}
- return (value != 0), nil
+ return value != 0, nil
- case 'b':
+ case 'B':
var value [1]byte
if _, err = io.ReadFull(r, value[0:1]); err != nil {
return
}
return value[0], nil
+ case 'b':
+ var value int8
+ if err = binary.Read(r, binary.BigEndian, &value); err != nil {
+ return
+ }
+ return value, nil
+
case 's':
var value int16
if err = binary.Read(r, binary.BigEndian, &value); err != nil {
@@ -260,12 +254,12 @@ func readField(r io.Reader) (v interface{}, err error) {
}
/*
- Field tables are long strings that contain packed name-value pairs. The
- name-value pairs are encoded as short string defining the name, and octet
- defining the values type and then the value itself. The valid field types for
- tables are an extension of the native integer, bit, string, and timestamp
- types, and are shown in the grammar. Multi-octet integer fields are always
- held in network byte order.
+Field tables are long strings that contain packed name-value pairs. The
+name-value pairs are encoded as short string defining the name, and octet
+defining the values type and then the value itself. The valid field types for
+tables are an extension of the native integer, bit, string, and timestamp
+types, and are shown in the grammar. Multi-octet integer fields are always
+held in network byte order.
*/
func readTable(r io.Reader) (table Table, err error) {
var nested bytes.Buffer
@@ -309,7 +303,7 @@ func readArray(r io.Reader) ([]interface{}, error) {
var (
lim = &io.LimitedReader{R: r, N: int64(size)}
- arr = []interface{}{}
+ arr []interface{}
field interface{}
)
diff --git a/vendor/github.com/streadway/amqp/return.go b/vendor/github.com/rabbitmq/amqp091-go/return.go
similarity index 93%
rename from vendor/github.com/streadway/amqp/return.go
rename to vendor/github.com/rabbitmq/amqp091-go/return.go
index 10dcedb2c8..cdc3875edc 100644
--- a/vendor/github.com/streadway/amqp/return.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/return.go
@@ -1,9 +1,9 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-package amqp
+package amqp091
import (
"time"
diff --git a/vendor/github.com/streadway/amqp/spec091.go b/vendor/github.com/rabbitmq/amqp091-go/spec091.go
similarity index 92%
rename from vendor/github.com/streadway/amqp/spec091.go
rename to vendor/github.com/rabbitmq/amqp091-go/spec091.go
index cd53ebe740..6e02ba9971 100644
--- a/vendor/github.com/streadway/amqp/spec091.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/spec091.go
@@ -1,12 +1,12 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
/* GENERATED FILE - DO NOT EDIT */
/* Rebuild from the spec/gen.go tool */
-package amqp
+package amqp091
import (
"encoding/binary"
@@ -552,6 +552,66 @@ func (msg *connectionUnblocked) read(r io.Reader) (err error) {
return
}
+type connectionUpdateSecret struct {
+ NewSecret string
+ Reason string
+}
+
+func (msg *connectionUpdateSecret) id() (uint16, uint16) {
+ return 10, 70
+}
+
+func (msg *connectionUpdateSecret) wait() bool {
+ return true
+}
+
+func (msg *connectionUpdateSecret) write(w io.Writer) (err error) {
+
+ if err = writeLongstr(w, msg.NewSecret); err != nil {
+ return
+ }
+
+ if err = writeShortstr(w, msg.Reason); err != nil {
+ return
+ }
+
+ return
+}
+
+func (msg *connectionUpdateSecret) read(r io.Reader) (err error) {
+
+ if msg.NewSecret, err = readLongstr(r); err != nil {
+ return
+ }
+
+ if msg.Reason, err = readShortstr(r); err != nil {
+ return
+ }
+
+ return
+}
+
+type connectionUpdateSecretOk struct {
+}
+
+func (msg *connectionUpdateSecretOk) id() (uint16, uint16) {
+ return 10, 71
+}
+
+func (msg *connectionUpdateSecretOk) wait() bool {
+ return true
+}
+
+func (msg *connectionUpdateSecretOk) write(w io.Writer) (err error) {
+
+ return
+}
+
+func (msg *connectionUpdateSecretOk) read(r io.Reader) (err error) {
+
+ return
+}
+
type channelOpen struct {
reserved1 string
}
@@ -2757,7 +2817,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // connection start
- //fmt.Println("NextMethod: class:10 method:10")
+ // fmt.Println("NextMethod: class:10 method:10")
method := &connectionStart{}
if err = method.read(r.r); err != nil {
return
@@ -2765,7 +2825,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // connection start-ok
- //fmt.Println("NextMethod: class:10 method:11")
+ // fmt.Println("NextMethod: class:10 method:11")
method := &connectionStartOk{}
if err = method.read(r.r); err != nil {
return
@@ -2773,7 +2833,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // connection secure
- //fmt.Println("NextMethod: class:10 method:20")
+ // fmt.Println("NextMethod: class:10 method:20")
method := &connectionSecure{}
if err = method.read(r.r); err != nil {
return
@@ -2781,7 +2841,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // connection secure-ok
- //fmt.Println("NextMethod: class:10 method:21")
+ // fmt.Println("NextMethod: class:10 method:21")
method := &connectionSecureOk{}
if err = method.read(r.r); err != nil {
return
@@ -2789,7 +2849,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 30: // connection tune
- //fmt.Println("NextMethod: class:10 method:30")
+ // fmt.Println("NextMethod: class:10 method:30")
method := &connectionTune{}
if err = method.read(r.r); err != nil {
return
@@ -2797,7 +2857,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 31: // connection tune-ok
- //fmt.Println("NextMethod: class:10 method:31")
+ // fmt.Println("NextMethod: class:10 method:31")
method := &connectionTuneOk{}
if err = method.read(r.r); err != nil {
return
@@ -2805,7 +2865,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 40: // connection open
- //fmt.Println("NextMethod: class:10 method:40")
+ // fmt.Println("NextMethod: class:10 method:40")
method := &connectionOpen{}
if err = method.read(r.r); err != nil {
return
@@ -2813,7 +2873,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 41: // connection open-ok
- //fmt.Println("NextMethod: class:10 method:41")
+ // fmt.Println("NextMethod: class:10 method:41")
method := &connectionOpenOk{}
if err = method.read(r.r); err != nil {
return
@@ -2821,7 +2881,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 50: // connection close
- //fmt.Println("NextMethod: class:10 method:50")
+ // fmt.Println("NextMethod: class:10 method:50")
method := &connectionClose{}
if err = method.read(r.r); err != nil {
return
@@ -2829,7 +2889,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 51: // connection close-ok
- //fmt.Println("NextMethod: class:10 method:51")
+ // fmt.Println("NextMethod: class:10 method:51")
method := &connectionCloseOk{}
if err = method.read(r.r); err != nil {
return
@@ -2837,7 +2897,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 60: // connection blocked
- //fmt.Println("NextMethod: class:10 method:60")
+ // fmt.Println("NextMethod: class:10 method:60")
method := &connectionBlocked{}
if err = method.read(r.r); err != nil {
return
@@ -2845,13 +2905,29 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 61: // connection unblocked
- //fmt.Println("NextMethod: class:10 method:61")
+ // fmt.Println("NextMethod: class:10 method:61")
method := &connectionUnblocked{}
if err = method.read(r.r); err != nil {
return
}
mf.Method = method
+ case 70: // connection update-secret
+ // fmt.Println("NextMethod: class:10 method:70")
+ method := &connectionUpdateSecret{}
+ if err = method.read(r.r); err != nil {
+ return
+ }
+ mf.Method = method
+
+ case 71: // connection update-secret-ok
+ // fmt.Println("NextMethod: class:10 method:71")
+ method := &connectionUpdateSecretOk{}
+ if err = method.read(r.r); err != nil {
+ return
+ }
+ mf.Method = method
+
default:
return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId)
}
@@ -2860,7 +2936,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // channel open
- //fmt.Println("NextMethod: class:20 method:10")
+ // fmt.Println("NextMethod: class:20 method:10")
method := &channelOpen{}
if err = method.read(r.r); err != nil {
return
@@ -2868,7 +2944,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // channel open-ok
- //fmt.Println("NextMethod: class:20 method:11")
+ // fmt.Println("NextMethod: class:20 method:11")
method := &channelOpenOk{}
if err = method.read(r.r); err != nil {
return
@@ -2876,7 +2952,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // channel flow
- //fmt.Println("NextMethod: class:20 method:20")
+ // fmt.Println("NextMethod: class:20 method:20")
method := &channelFlow{}
if err = method.read(r.r); err != nil {
return
@@ -2884,7 +2960,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // channel flow-ok
- //fmt.Println("NextMethod: class:20 method:21")
+ // fmt.Println("NextMethod: class:20 method:21")
method := &channelFlowOk{}
if err = method.read(r.r); err != nil {
return
@@ -2892,7 +2968,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 40: // channel close
- //fmt.Println("NextMethod: class:20 method:40")
+ // fmt.Println("NextMethod: class:20 method:40")
method := &channelClose{}
if err = method.read(r.r); err != nil {
return
@@ -2900,7 +2976,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 41: // channel close-ok
- //fmt.Println("NextMethod: class:20 method:41")
+ // fmt.Println("NextMethod: class:20 method:41")
method := &channelCloseOk{}
if err = method.read(r.r); err != nil {
return
@@ -2915,7 +2991,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // exchange declare
- //fmt.Println("NextMethod: class:40 method:10")
+ // fmt.Println("NextMethod: class:40 method:10")
method := &exchangeDeclare{}
if err = method.read(r.r); err != nil {
return
@@ -2923,7 +2999,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // exchange declare-ok
- //fmt.Println("NextMethod: class:40 method:11")
+ // fmt.Println("NextMethod: class:40 method:11")
method := &exchangeDeclareOk{}
if err = method.read(r.r); err != nil {
return
@@ -2931,7 +3007,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // exchange delete
- //fmt.Println("NextMethod: class:40 method:20")
+ // fmt.Println("NextMethod: class:40 method:20")
method := &exchangeDelete{}
if err = method.read(r.r); err != nil {
return
@@ -2939,7 +3015,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // exchange delete-ok
- //fmt.Println("NextMethod: class:40 method:21")
+ // fmt.Println("NextMethod: class:40 method:21")
method := &exchangeDeleteOk{}
if err = method.read(r.r); err != nil {
return
@@ -2947,7 +3023,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 30: // exchange bind
- //fmt.Println("NextMethod: class:40 method:30")
+ // fmt.Println("NextMethod: class:40 method:30")
method := &exchangeBind{}
if err = method.read(r.r); err != nil {
return
@@ -2955,7 +3031,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 31: // exchange bind-ok
- //fmt.Println("NextMethod: class:40 method:31")
+ // fmt.Println("NextMethod: class:40 method:31")
method := &exchangeBindOk{}
if err = method.read(r.r); err != nil {
return
@@ -2963,7 +3039,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 40: // exchange unbind
- //fmt.Println("NextMethod: class:40 method:40")
+ // fmt.Println("NextMethod: class:40 method:40")
method := &exchangeUnbind{}
if err = method.read(r.r); err != nil {
return
@@ -2971,7 +3047,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 51: // exchange unbind-ok
- //fmt.Println("NextMethod: class:40 method:51")
+ // fmt.Println("NextMethod: class:40 method:51")
method := &exchangeUnbindOk{}
if err = method.read(r.r); err != nil {
return
@@ -2986,7 +3062,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // queue declare
- //fmt.Println("NextMethod: class:50 method:10")
+ // fmt.Println("NextMethod: class:50 method:10")
method := &queueDeclare{}
if err = method.read(r.r); err != nil {
return
@@ -2994,7 +3070,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // queue declare-ok
- //fmt.Println("NextMethod: class:50 method:11")
+ // fmt.Println("NextMethod: class:50 method:11")
method := &queueDeclareOk{}
if err = method.read(r.r); err != nil {
return
@@ -3002,7 +3078,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // queue bind
- //fmt.Println("NextMethod: class:50 method:20")
+ // fmt.Println("NextMethod: class:50 method:20")
method := &queueBind{}
if err = method.read(r.r); err != nil {
return
@@ -3010,7 +3086,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // queue bind-ok
- //fmt.Println("NextMethod: class:50 method:21")
+ // fmt.Println("NextMethod: class:50 method:21")
method := &queueBindOk{}
if err = method.read(r.r); err != nil {
return
@@ -3018,7 +3094,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 50: // queue unbind
- //fmt.Println("NextMethod: class:50 method:50")
+ // fmt.Println("NextMethod: class:50 method:50")
method := &queueUnbind{}
if err = method.read(r.r); err != nil {
return
@@ -3026,7 +3102,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 51: // queue unbind-ok
- //fmt.Println("NextMethod: class:50 method:51")
+ // fmt.Println("NextMethod: class:50 method:51")
method := &queueUnbindOk{}
if err = method.read(r.r); err != nil {
return
@@ -3034,7 +3110,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 30: // queue purge
- //fmt.Println("NextMethod: class:50 method:30")
+ // fmt.Println("NextMethod: class:50 method:30")
method := &queuePurge{}
if err = method.read(r.r); err != nil {
return
@@ -3042,7 +3118,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 31: // queue purge-ok
- //fmt.Println("NextMethod: class:50 method:31")
+ // fmt.Println("NextMethod: class:50 method:31")
method := &queuePurgeOk{}
if err = method.read(r.r); err != nil {
return
@@ -3050,7 +3126,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 40: // queue delete
- //fmt.Println("NextMethod: class:50 method:40")
+ // fmt.Println("NextMethod: class:50 method:40")
method := &queueDelete{}
if err = method.read(r.r); err != nil {
return
@@ -3058,7 +3134,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 41: // queue delete-ok
- //fmt.Println("NextMethod: class:50 method:41")
+ // fmt.Println("NextMethod: class:50 method:41")
method := &queueDeleteOk{}
if err = method.read(r.r); err != nil {
return
@@ -3073,7 +3149,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // basic qos
- //fmt.Println("NextMethod: class:60 method:10")
+ // fmt.Println("NextMethod: class:60 method:10")
method := &basicQos{}
if err = method.read(r.r); err != nil {
return
@@ -3081,7 +3157,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // basic qos-ok
- //fmt.Println("NextMethod: class:60 method:11")
+ // fmt.Println("NextMethod: class:60 method:11")
method := &basicQosOk{}
if err = method.read(r.r); err != nil {
return
@@ -3089,7 +3165,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // basic consume
- //fmt.Println("NextMethod: class:60 method:20")
+ // fmt.Println("NextMethod: class:60 method:20")
method := &basicConsume{}
if err = method.read(r.r); err != nil {
return
@@ -3097,7 +3173,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // basic consume-ok
- //fmt.Println("NextMethod: class:60 method:21")
+ // fmt.Println("NextMethod: class:60 method:21")
method := &basicConsumeOk{}
if err = method.read(r.r); err != nil {
return
@@ -3105,7 +3181,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 30: // basic cancel
- //fmt.Println("NextMethod: class:60 method:30")
+ // fmt.Println("NextMethod: class:60 method:30")
method := &basicCancel{}
if err = method.read(r.r); err != nil {
return
@@ -3113,7 +3189,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 31: // basic cancel-ok
- //fmt.Println("NextMethod: class:60 method:31")
+ // fmt.Println("NextMethod: class:60 method:31")
method := &basicCancelOk{}
if err = method.read(r.r); err != nil {
return
@@ -3121,7 +3197,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 40: // basic publish
- //fmt.Println("NextMethod: class:60 method:40")
+ // fmt.Println("NextMethod: class:60 method:40")
method := &basicPublish{}
if err = method.read(r.r); err != nil {
return
@@ -3129,7 +3205,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 50: // basic return
- //fmt.Println("NextMethod: class:60 method:50")
+ // fmt.Println("NextMethod: class:60 method:50")
method := &basicReturn{}
if err = method.read(r.r); err != nil {
return
@@ -3137,7 +3213,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 60: // basic deliver
- //fmt.Println("NextMethod: class:60 method:60")
+ // fmt.Println("NextMethod: class:60 method:60")
method := &basicDeliver{}
if err = method.read(r.r); err != nil {
return
@@ -3145,7 +3221,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 70: // basic get
- //fmt.Println("NextMethod: class:60 method:70")
+ // fmt.Println("NextMethod: class:60 method:70")
method := &basicGet{}
if err = method.read(r.r); err != nil {
return
@@ -3153,7 +3229,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 71: // basic get-ok
- //fmt.Println("NextMethod: class:60 method:71")
+ // fmt.Println("NextMethod: class:60 method:71")
method := &basicGetOk{}
if err = method.read(r.r); err != nil {
return
@@ -3161,7 +3237,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 72: // basic get-empty
- //fmt.Println("NextMethod: class:60 method:72")
+ // fmt.Println("NextMethod: class:60 method:72")
method := &basicGetEmpty{}
if err = method.read(r.r); err != nil {
return
@@ -3169,7 +3245,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 80: // basic ack
- //fmt.Println("NextMethod: class:60 method:80")
+ // fmt.Println("NextMethod: class:60 method:80")
method := &basicAck{}
if err = method.read(r.r); err != nil {
return
@@ -3177,7 +3253,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 90: // basic reject
- //fmt.Println("NextMethod: class:60 method:90")
+ // fmt.Println("NextMethod: class:60 method:90")
method := &basicReject{}
if err = method.read(r.r); err != nil {
return
@@ -3185,7 +3261,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 100: // basic recover-async
- //fmt.Println("NextMethod: class:60 method:100")
+ // fmt.Println("NextMethod: class:60 method:100")
method := &basicRecoverAsync{}
if err = method.read(r.r); err != nil {
return
@@ -3193,7 +3269,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 110: // basic recover
- //fmt.Println("NextMethod: class:60 method:110")
+ // fmt.Println("NextMethod: class:60 method:110")
method := &basicRecover{}
if err = method.read(r.r); err != nil {
return
@@ -3201,7 +3277,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 111: // basic recover-ok
- //fmt.Println("NextMethod: class:60 method:111")
+ // fmt.Println("NextMethod: class:60 method:111")
method := &basicRecoverOk{}
if err = method.read(r.r); err != nil {
return
@@ -3209,7 +3285,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 120: // basic nack
- //fmt.Println("NextMethod: class:60 method:120")
+ // fmt.Println("NextMethod: class:60 method:120")
method := &basicNack{}
if err = method.read(r.r); err != nil {
return
@@ -3224,7 +3300,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // tx select
- //fmt.Println("NextMethod: class:90 method:10")
+ // fmt.Println("NextMethod: class:90 method:10")
method := &txSelect{}
if err = method.read(r.r); err != nil {
return
@@ -3232,7 +3308,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // tx select-ok
- //fmt.Println("NextMethod: class:90 method:11")
+ // fmt.Println("NextMethod: class:90 method:11")
method := &txSelectOk{}
if err = method.read(r.r); err != nil {
return
@@ -3240,7 +3316,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 20: // tx commit
- //fmt.Println("NextMethod: class:90 method:20")
+ // fmt.Println("NextMethod: class:90 method:20")
method := &txCommit{}
if err = method.read(r.r); err != nil {
return
@@ -3248,7 +3324,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 21: // tx commit-ok
- //fmt.Println("NextMethod: class:90 method:21")
+ // fmt.Println("NextMethod: class:90 method:21")
method := &txCommitOk{}
if err = method.read(r.r); err != nil {
return
@@ -3256,7 +3332,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 30: // tx rollback
- //fmt.Println("NextMethod: class:90 method:30")
+ // fmt.Println("NextMethod: class:90 method:30")
method := &txRollback{}
if err = method.read(r.r); err != nil {
return
@@ -3264,7 +3340,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 31: // tx rollback-ok
- //fmt.Println("NextMethod: class:90 method:31")
+ // fmt.Println("NextMethod: class:90 method:31")
method := &txRollbackOk{}
if err = method.read(r.r); err != nil {
return
@@ -3279,7 +3355,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
switch mf.MethodId {
case 10: // confirm select
- //fmt.Println("NextMethod: class:85 method:10")
+ // fmt.Println("NextMethod: class:85 method:10")
method := &confirmSelect{}
if err = method.read(r.r); err != nil {
return
@@ -3287,7 +3363,7 @@ func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err err
mf.Method = method
case 11: // confirm select-ok
- //fmt.Println("NextMethod: class:85 method:11")
+ // fmt.Println("NextMethod: class:85 method:11")
method := &confirmSelectOk{}
if err = method.read(r.r); err != nil {
return
diff --git a/vendor/github.com/rabbitmq/amqp091-go/types.go b/vendor/github.com/rabbitmq/amqp091-go/types.go
new file mode 100644
index 0000000000..8f43a726f8
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/types.go
@@ -0,0 +1,535 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "fmt"
+ "io"
+ "time"
+)
+
+// DefaultExchange is the default direct exchange that binds every queue by its
+// name. Applications can route to a queue using the queue name as routing key.
+const DefaultExchange = ""
+
+// Constants for standard AMQP 0-9-1 exchange types.
+const (
+ ExchangeDirect = "direct"
+ ExchangeFanout = "fanout"
+ ExchangeTopic = "topic"
+ ExchangeHeaders = "headers"
+)
+
+var (
+ // ErrClosed is returned when the channel or connection is not open
+ ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"}
+
+ // ErrChannelMax is returned when Connection.Channel has been called enough
+ // times that all channel IDs have been exhausted in the client or the
+ // server.
+ ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"}
+
+ // ErrSASL is returned from Dial when the authentication mechanism could not
+ // be negotiated.
+ ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"}
+
+ // ErrCredentials is returned when the authenticated client is not authorized
+ // to any vhost.
+ ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"}
+
+ // ErrVhost is returned when the authenticated user is not permitted to
+ // access the requested Vhost.
+ ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"}
+
+ // ErrSyntax is hard protocol error, indicating an unsupported protocol,
+ // implementation or encoding.
+ ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"}
+
+ // ErrFrame is returned when the protocol frame cannot be read from the
+ // server, indicating an unsupported protocol or unsupported frame type.
+ ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"}
+
+ // ErrCommandInvalid is returned when the server sends an unexpected response
+ // to this requested message type. This indicates a bug in this client.
+ ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"}
+
+ // ErrUnexpectedFrame is returned when something other than a method or
+ // heartbeat frame is delivered to the Connection, indicating a bug in the
+ // client.
+ ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"}
+
+ // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP.
+ ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"}
+)
+
+// internal errors used inside the library
+var (
+ errInvalidTypeAssertion = &Error{Code: InternalError, Reason: "type assertion unsuccessful", Server: false, Recover: true}
+)
+
+// Error captures the code and reason a channel or connection has been closed
+// by the server.
+type Error struct {
+ Code int // constant code from the specification
+ Reason string // description of the error
+ Server bool // true when initiated from the server, false when from this library
+ Recover bool // true when this error can be recovered by retrying later or with different parameters
+}
+
+func newError(code uint16, text string) *Error {
+ return &Error{
+ Code: int(code),
+ Reason: text,
+ Recover: isSoftExceptionCode(int(code)),
+ Server: true,
+ }
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason)
+}
+
+// Used by header frames to capture routing and header information
+type properties struct {
+ ContentType string // MIME content type
+ ContentEncoding string // MIME content encoding
+ Headers Table // Application or header exchange table
+ DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2)
+ Priority uint8 // queue implementation use - 0 to 9
+ CorrelationId string // application use - correlation identifier
+ ReplyTo string // application use - address to to reply to (ex: RPC)
+ Expiration string // implementation use - message expiration spec
+ MessageId string // application use - message identifier
+ Timestamp time.Time // application use - message timestamp
+ Type string // application use - message type name
+ UserId string // application use - creating user id
+ AppId string // application use - creating application
+ reserved1 string // was cluster-id - process for buffer consumption
+}
+
+// DeliveryMode. Transient means higher throughput but messages will not be
+// restored on broker restart. The delivery mode of publishings is unrelated
+// to the durability of the queues they reside on. Transient messages will
+// not be restored to durable queues, persistent messages will be restored to
+// durable queues and lost on non-durable queues during server restart.
+//
+// This remains typed as uint8 to match Publishing.DeliveryMode. Other
+// delivery modes specific to custom queue implementations are not enumerated
+// here.
+const (
+ Transient uint8 = 1
+ Persistent uint8 = 2
+)
+
+// The property flags are an array of bits that indicate the presence or
+// absence of each property value in sequence. The bits are ordered from most
+// high to low - bit 15 indicates the first property.
+const (
+ flagContentType = 0x8000
+ flagContentEncoding = 0x4000
+ flagHeaders = 0x2000
+ flagDeliveryMode = 0x1000
+ flagPriority = 0x0800
+ flagCorrelationId = 0x0400
+ flagReplyTo = 0x0200
+ flagExpiration = 0x0100
+ flagMessageId = 0x0080
+ flagTimestamp = 0x0040
+ flagType = 0x0020
+ flagUserId = 0x0010
+ flagAppId = 0x0008
+ flagReserved1 = 0x0004
+)
+
+// Queue captures the current server state of the queue on the server returned
+// from Channel.QueueDeclare or Channel.QueueInspect.
+type Queue struct {
+ Name string // server confirmed or generated name
+ Messages int // count of messages not awaiting acknowledgment
+ Consumers int // number of consumers receiving deliveries
+}
+
+// Publishing captures the client message sent to the server. The fields
+// outside of the Headers table included in this struct mirror the underlying
+// fields in the content frame. They use native types for convenience and
+// efficiency.
+type Publishing struct {
+ // Application or exchange specific fields,
+ // the headers exchange will inspect this field.
+ Headers Table
+
+ // Properties
+ ContentType string // MIME content type
+ ContentEncoding string // MIME content encoding
+ DeliveryMode uint8 // Transient (0 or 1) or Persistent (2)
+ Priority uint8 // 0 to 9
+ CorrelationId string // correlation identifier
+ ReplyTo string // address to to reply to (ex: RPC)
+ Expiration string // message expiration spec
+ MessageId string // message identifier
+ Timestamp time.Time // message timestamp
+ Type string // message type name
+ UserId string // creating user id - ex: "guest"
+ AppId string // creating application id
+
+ // The application specific payload of the message
+ Body []byte
+}
+
+// Blocking notifies the server's TCP flow control of the Connection. When a
+// server hits a memory or disk alarm it will block all connections until the
+// resources are reclaimed. Use NotifyBlock on the Connection to receive these
+// events.
+type Blocking struct {
+ Active bool // TCP pushback active/inactive on server
+ Reason string // Server reason for activation
+}
+
+// DeferredConfirmation represents a future publisher confirm for a message. It
+// allows users to directly correlate a publishing to a confirmation. These are
+// returned from PublishWithDeferredConfirm on Channels.
+type DeferredConfirmation struct {
+ DeliveryTag uint64
+
+ done chan struct{}
+ ack bool
+}
+
+// Confirmation notifies the acknowledgment or negative acknowledgement of a
+// publishing identified by its delivery tag. Use NotifyPublish on the Channel
+// to consume these events.
+type Confirmation struct {
+ DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode
+ Ack bool // True when the server successfully received the publishing
+}
+
+// Decimal matches the AMQP decimal type. Scale is the number of decimal
+// digits Scale == 2, Value == 12345, Decimal == 123.45
+type Decimal struct {
+ Scale uint8
+ Value int32
+}
+
+// Most common queue argument keys in queue declaration. For a comprehensive list
+// of queue arguments, visit [RabbitMQ Queue docs].
+//
+// [QueueTypeArg] queue argument is used to declare quorum and stream queues.
+// Accepted values are [QueueTypeClassic] (default), [QueueTypeQuorum] and
+// [QueueTypeStream]. [Quorum Queues] accept (almost) all queue arguments as their
+// Classic Queues counterparts. Check [feature comparison] docs for more
+// information.
+//
+// Queues can define their [max length] using [QueueMaxLenArg] and
+// [QueueMaxLenBytesArg] queue arguments. Overflow behaviour is set using
+// [QueueOverflowArg]. Accepted values are [QueueOverflowDropHead] (default),
+// [QueueOverflowRejectPublish] and [QueueOverflowRejectPublishDLX].
+//
+// [Queue TTL] can be defined using [QueueTTLArg]. That is, the time-to-live for an
+// unused queue. [Queue Message TTL] can be defined using [QueueMessageTTLArg].
+// This will set a time-to-live for messages in the queue.
+//
+// [Stream retention] can be configured using [StreamMaxLenBytesArg], to set the
+// maximum size of the stream. Please note that stream queues always keep, at
+// least, one segment. [Stream retention] can also be set using [StreamMaxAgeArg],
+// to set time-based retention. Values are string with unit suffix. Valid
+// suffixes are Y, M, D, h, m, s. E.g. "7D" for one week. The maximum segment
+// size can be set using [StreamMaxSegmentSizeBytesArg]. The default value is
+// 500_000_000 bytes ~= 500 megabytes
+//
+// Starting with RabbitMQ 3.12, consumer timeout can be configured as a queue
+// argument. This is the timeout for a consumer to acknowledge a message. The
+// value is the time in milliseconds. The timeout is evaluated periodically,
+// at one minute intervals. Values lower than one minute are not supported.
+// See the [consumer timeout] guide for more information.
+//
+// [Single Active Consumer] on quorum and classic queues can be configured
+// using [SingleActiveConsumerArg]. This argument expects a boolean value. It is
+// false by default.
+//
+// [RabbitMQ Queue docs]: https://rabbitmq.com/queues.html
+// [Stream retention]: https://rabbitmq.com/streams.html#retention
+// [max length]: https://rabbitmq.com/maxlength.html
+// [Queue TTL]: https://rabbitmq.com/ttl.html#queue-ttl
+// [Queue Message TTL]: https://rabbitmq.com/ttl.html#per-queue-message-ttl
+// [Quorum Queues]: https://rabbitmq.com/quorum-queues.html
+// [feature comparison]: https://rabbitmq.com/quorum-queues.html#feature-comparison
+// [consumer timeout]: https://rabbitmq.com/consumers.html#acknowledgement-timeout
+// [Single Active Consumer]: https://rabbitmq.com/consumers.html#single-active-consumer
+const (
+ QueueTypeArg = "x-queue-type"
+ QueueMaxLenArg = "x-max-length"
+ QueueMaxLenBytesArg = "x-max-length-bytes"
+ StreamMaxLenBytesArg = "x-max-length-bytes"
+ QueueOverflowArg = "x-overflow"
+ QueueMessageTTLArg = "x-message-ttl"
+ QueueTTLArg = "x-expires"
+ StreamMaxAgeArg = "x-max-age"
+ StreamMaxSegmentSizeBytesArg = "x-stream-max-segment-size-bytes"
+ // QueueVersionArg declares the Classic Queue version to use. Expects an integer, either 1 or 2.
+ QueueVersionArg = "x-queue-version"
+ // ConsumerTimeoutArg is available in RabbitMQ 3.12+ as a queue argument.
+ ConsumerTimeoutArg = "x-consumer-timeout"
+ SingleActiveConsumerArg = "x-single-active-consumer"
+)
+
+// Values for queue arguments. Use as values for queue arguments during queue declaration.
+// The following argument table will create a classic queue, with max length set to 100 messages,
+// and a queue TTL of 30 minutes.
+//
+// args := amqp.Table{
+// amqp.QueueTypeArg: QueueTypeClassic,
+// amqp.QueueMaxLenArg: 100,
+// amqp.QueueTTLArg: 1800000,
+// }
+//
+// Refer to [Channel.QueueDeclare] for more examples.
+const (
+ QueueTypeClassic = "classic"
+ QueueTypeQuorum = "quorum"
+ QueueTypeStream = "stream"
+ QueueOverflowDropHead = "drop-head"
+ QueueOverflowRejectPublish = "reject-publish"
+ QueueOverflowRejectPublishDLX = "reject-publish-dlx"
+)
+
+// Table stores user supplied fields of the following types:
+//
+// bool
+// byte
+// int8
+// float32
+// float64
+// int
+// int16
+// int32
+// int64
+// nil
+// string
+// time.Time
+// amqp.Decimal
+// amqp.Table
+// []byte
+// []interface{} - containing above types
+//
+// Functions taking a table will immediately fail when the table contains a
+// value of an unsupported type.
+//
+// The caller must be specific in which precision of integer it wishes to
+// encode.
+//
+// Use a type assertion when reading values from a table for type conversion.
+//
+// RabbitMQ expects int32 for integer values.
+type Table map[string]interface{}
+
+func validateField(f interface{}) error {
+ switch fv := f.(type) {
+ case nil, bool, byte, int8, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time:
+ return nil
+
+ case []interface{}:
+ for _, v := range fv {
+ if err := validateField(v); err != nil {
+ return fmt.Errorf("in array %s", err)
+ }
+ }
+ return nil
+
+ case Table:
+ for k, v := range fv {
+ if err := validateField(v); err != nil {
+ return fmt.Errorf("table field %q %s", k, err)
+ }
+ }
+ return nil
+ }
+
+ return fmt.Errorf("value %T not supported", f)
+}
+
+// Validate returns and error if any Go types in the table are incompatible with AMQP types.
+func (t Table) Validate() error {
+ return validateField(t)
+}
+
+// Sets the connection name property. This property can be used in
+// amqp.Config to set a custom connection name during amqp.DialConfig(). This
+// can be helpful to identify specific connections in RabbitMQ, for debugging or
+// tracing purposes.
+func (t Table) SetClientConnectionName(connName string) {
+ t["connection_name"] = connName
+}
+
+type message interface {
+ id() (uint16, uint16)
+ wait() bool
+ read(io.Reader) error
+ write(io.Writer) error
+}
+
+type messageWithContent interface {
+ message
+ getContent() (properties, []byte)
+ setContent(properties, []byte)
+}
+
+/*
+The base interface implemented as:
+
+2.3.5 frame Details
+
+All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects
+malformed frames:
+
+ 0 1 3 7 size+7 size+8
+ +------+---------+-------------+ +------------+ +-----------+
+ | type | channel | size | | payload | | frame-end |
+ +------+---------+-------------+ +------------+ +-----------+
+ octet short long size octets octet
+
+To read a frame, we:
+
+ 1. Read the header and check the frame type and channel.
+ 2. Depending on the frame type, we read the payload and process it.
+ 3. Read the frame end octet.
+
+In realistic implementations where performance is a concern, we would use
+“read-ahead buffering” or “gathering reads” to avoid doing three separate
+system calls to read a frame.
+*/
+type frame interface {
+ write(io.Writer) error
+ channel() uint16
+}
+
+/*
+Perform any updates on the channel immediately after the frame is decoded while the
+connection mutex is held.
+*/
+func updateChannel(f frame, channel *Channel) {
+ if mf, isMethodFrame := f.(*methodFrame); isMethodFrame {
+ if _, isChannelClose := mf.Method.(*channelClose); isChannelClose {
+ channel.setClosed()
+ }
+ }
+}
+
+type reader struct {
+ r io.Reader
+}
+
+type writer struct {
+ w io.Writer
+}
+
+// Implements the frame interface for Connection RPC
+type protocolHeader struct{}
+
+func (protocolHeader) write(w io.Writer) error {
+ _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1})
+ return err
+}
+
+func (protocolHeader) channel() uint16 {
+ panic("only valid as initial handshake")
+}
+
+/*
+Method frames carry the high-level protocol commands (which we call "methods").
+One method frame carries one command. The method frame payload has this format:
+
+ 0 2 4
+ +----------+-----------+-------------- - -
+ | class-id | method-id | arguments...
+ +----------+-----------+-------------- - -
+ short short ...
+
+To process a method frame, we:
+ 1. Read the method frame payload.
+ 2. Unpack it into a structure. A given method always has the same structure,
+ so we can unpack the method rapidly. 3. Check that the method is allowed in
+ the current context.
+ 4. Check that the method arguments are valid.
+ 5. Execute the method.
+
+Method frame bodies are constructed as a list of AMQP data fields (bits,
+integers, strings and string tables). The marshalling code is trivially
+generated directly from the protocol specifications, and can be very rapid.
+*/
+type methodFrame struct {
+ ChannelId uint16
+ ClassId uint16
+ MethodId uint16
+ Method message
+}
+
+func (f *methodFrame) channel() uint16 { return f.ChannelId }
+
+/*
+Heartbeating is a technique designed to undo one of TCP/IP's features, namely
+its ability to recover from a broken physical connection by closing only after
+a quite long time-out. In some scenarios we need to know very rapidly if a
+peer is disconnected or not responding for other reasons (e.g. it is looping).
+Since heartbeating can be done at a low level, we implement this as a special
+type of frame that peers exchange at the transport level, rather than as a
+class method.
+*/
+type heartbeatFrame struct {
+ ChannelId uint16
+}
+
+func (f *heartbeatFrame) channel() uint16 { return f.ChannelId }
+
+/*
+Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally
+defined as carrying content. When a peer sends such a method frame, it always
+follows it with a content header and zero or more content body frames.
+
+A content header frame has this format:
+
+ 0 2 4 12 14
+ +----------+--------+-----------+----------------+------------- - -
+ | class-id | weight | body size | property flags | property list...
+ +----------+--------+-----------+----------------+------------- - -
+ short short long long short remainder...
+
+We place content body in distinct frames (rather than including it in the
+method) so that AMQP may support "zero copy" techniques in which content is
+never marshalled or encoded. We place the content properties in their own
+frame so that recipients can selectively discard contents they do not want to
+process
+*/
+type headerFrame struct {
+ ChannelId uint16
+ ClassId uint16
+ weight uint16
+ Size uint64
+ Properties properties
+}
+
+func (f *headerFrame) channel() uint16 { return f.ChannelId }
+
+/*
+Content is the application data we carry from client-to-client via the AMQP
+server. Content is, roughly speaking, a set of properties plus a binary data
+part. The set of allowed properties are defined by the Basic class, and these
+form the "content header frame". The data can be any size, and MAY be broken
+into several (or many) chunks, each forming a "content body frame".
+
+Looking at the frames for a specific channel, as they pass on the wire, we
+might see something like this:
+
+ [method]
+ [method] [header] [body] [body]
+ [method]
+ ...
+*/
+type bodyFrame struct {
+ ChannelId uint16
+ Body []byte
+}
+
+func (f *bodyFrame) channel() uint16 { return f.ChannelId }
diff --git a/vendor/github.com/rabbitmq/amqp091-go/uri.go b/vendor/github.com/rabbitmq/amqp091-go/uri.go
new file mode 100644
index 0000000000..87ef09e6fe
--- /dev/null
+++ b/vendor/github.com/rabbitmq/amqp091-go/uri.go
@@ -0,0 +1,196 @@
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amqp091
+
+import (
+ "errors"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'")
+var errURIWhitespace = errors.New("URI must not contain whitespace")
+
+var schemePorts = map[string]int{
+ "amqp": 5672,
+ "amqps": 5671,
+}
+
+var defaultURI = URI{
+ Scheme: "amqp",
+ Host: "localhost",
+ Port: 5672,
+ Username: "guest",
+ Password: "guest",
+ Vhost: "/",
+}
+
+// URI represents a parsed AMQP URI string.
+type URI struct {
+ Scheme string
+ Host string
+ Port int
+ Username string
+ Password string
+ Vhost string
+ CertFile string // client TLS auth - path to certificate (PEM)
+ CACertFile string // client TLS auth - path to CA certificate (PEM)
+ KeyFile string // client TLS auth - path to private key (PEM)
+ ServerName string // client TLS auth - server name
+}
+
+// ParseURI attempts to parse the given AMQP URI according to the spec.
+// See http://www.rabbitmq.com/uri-spec.html.
+//
+// Default values for the fields are:
+//
+// Scheme: amqp
+// Host: localhost
+// Port: 5672
+// Username: guest
+// Password: guest
+// Vhost: /
+//
+// Supports TLS query parameters. See https://www.rabbitmq.com/uri-query-parameters.html
+//
+// certfile:
+// keyfile:
+// cacertfile:
+// server_name_indication:
+//
+// If cacertfile is not provided, system CA certificates will be used.
+// Mutual TLS (client auth) will be enabled only in case keyfile AND certfile provided.
+//
+// If Config.TLSClientConfig is set, TLS parameters from URI will be ignored.
+func ParseURI(uri string) (URI, error) {
+ builder := defaultURI
+
+ if strings.Contains(uri, " ") {
+ return builder, errURIWhitespace
+ }
+
+ u, err := url.Parse(uri)
+ if err != nil {
+ return builder, err
+ }
+
+ defaultPort, okScheme := schemePorts[u.Scheme]
+
+ if okScheme {
+ builder.Scheme = u.Scheme
+ } else {
+ return builder, errURIScheme
+ }
+
+ host := u.Hostname()
+ port := u.Port()
+
+ if host != "" {
+ builder.Host = host
+ }
+
+ if port != "" {
+ port32, err := strconv.ParseInt(port, 10, 32)
+ if err != nil {
+ return builder, err
+ }
+ builder.Port = int(port32)
+ } else {
+ builder.Port = defaultPort
+ }
+
+ if u.User != nil {
+ builder.Username = u.User.Username()
+ if password, ok := u.User.Password(); ok {
+ builder.Password = password
+ }
+ }
+
+ if u.Path != "" {
+ if strings.HasPrefix(u.Path, "/") {
+ if u.Host == "" && strings.HasPrefix(u.Path, "///") {
+ // net/url doesn't handle local context authorities and leaves that up
+ // to the scheme handler. In our case, we translate amqp:/// into the
+ // default host and whatever the vhost should be
+ if len(u.Path) > 3 {
+ builder.Vhost = u.Path[3:]
+ }
+ } else if len(u.Path) > 1 {
+ builder.Vhost = u.Path[1:]
+ }
+ } else {
+ builder.Vhost = u.Path
+ }
+ }
+
+ // see https://www.rabbitmq.com/uri-query-parameters.html
+ params := u.Query()
+ builder.CertFile = params.Get("certfile")
+ builder.KeyFile = params.Get("keyfile")
+ builder.CACertFile = params.Get("cacertfile")
+ builder.ServerName = params.Get("server_name_indication")
+
+ return builder, nil
+}
+
+// PlainAuth returns a PlainAuth structure based on the parsed URI's
+// Username and Password fields.
+func (uri URI) PlainAuth() *PlainAuth {
+ return &PlainAuth{
+ Username: uri.Username,
+ Password: uri.Password,
+ }
+}
+
+// AMQPlainAuth returns a PlainAuth structure based on the parsed URI's
+// Username and Password fields.
+func (uri URI) AMQPlainAuth() *AMQPlainAuth {
+ return &AMQPlainAuth{
+ Username: uri.Username,
+ Password: uri.Password,
+ }
+}
+
+func (uri URI) String() string {
+ authority, err := url.Parse("")
+ if err != nil {
+ return err.Error()
+ }
+
+ authority.Scheme = uri.Scheme
+
+ if uri.Username != defaultURI.Username || uri.Password != defaultURI.Password {
+ authority.User = url.User(uri.Username)
+
+ if uri.Password != defaultURI.Password {
+ authority.User = url.UserPassword(uri.Username, uri.Password)
+ }
+ }
+
+ if defaultPort, found := schemePorts[uri.Scheme]; !found || defaultPort != uri.Port {
+ authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port))
+ } else {
+ // JoinHostPort() automatically add brackets to the host if it's
+ // an IPv6 address.
+ //
+ // If not port is specified, JoinHostPort() return an IP address in the
+ // form of "[::1]:", so we use TrimSuffix() to remove the extra ":".
+ authority.Host = strings.TrimSuffix(net.JoinHostPort(uri.Host, ""), ":")
+ }
+
+ if uri.Vhost != defaultURI.Vhost {
+ // Make sure net/url does not double escape, e.g.
+ // "%2F" does not become "%252F".
+ authority.Path = uri.Vhost
+ authority.RawPath = url.QueryEscape(uri.Vhost)
+ } else {
+ authority.Path = "/"
+ }
+
+ return authority.String()
+}
diff --git a/vendor/github.com/streadway/amqp/write.go b/vendor/github.com/rabbitmq/amqp091-go/write.go
similarity index 94%
rename from vendor/github.com/streadway/amqp/write.go
rename to vendor/github.com/rabbitmq/amqp091-go/write.go
index 94a46d115e..dcec314489 100644
--- a/vendor/github.com/streadway/amqp/write.go
+++ b/vendor/github.com/rabbitmq/amqp091-go/write.go
@@ -1,9 +1,9 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
+// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved.
+// Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-package amqp
+package amqp091
import (
"bufio"
@@ -15,6 +15,11 @@ import (
"time"
)
+func (w *writer) WriteFrameNoFlush(frame frame) (err error) {
+ err = frame.write(w.w)
+ return
+}
+
func (w *writer) WriteFrame(frame frame) (err error) {
if err = frame.write(w.w); err != nil {
return
@@ -63,11 +68,10 @@ func (f *heartbeatFrame) write(w io.Writer) (err error) {
// +----------+--------+-----------+----------------+------------- - -
// | class-id | weight | body size | property flags | property list...
// +----------+--------+-----------+----------------+------------- - -
-// short short long long short remainder...
//
+// short short long long short remainder...
func (f *headerFrame) write(w io.Writer) (err error) {
var payload bytes.Buffer
- var zeroTime time.Time
if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil {
return
@@ -113,7 +117,7 @@ func (f *headerFrame) write(w io.Writer) (err error) {
if len(f.Properties.MessageId) > 0 {
mask = mask | flagMessageId
}
- if f.Properties.Timestamp != zeroTime {
+ if !f.Properties.Timestamp.IsZero() {
mask = mask | flagTimestamp
}
if len(f.Properties.Type) > 0 {
@@ -212,7 +216,7 @@ func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err err
size := uint(len(payload))
_, err = w.Write([]byte{
- byte(typ),
+ typ,
byte((channel & 0xff00) >> 8),
byte((channel & 0x00ff) >> 0),
byte((size & 0xff000000) >> 24),
@@ -276,7 +280,8 @@ func writeLongstr(w io.Writer, s string) (err error) {
'S': string
'T': time.Time
'V': nil
-'b': byte
+'b': int8
+'B': byte
'd': float64
'f': float32
'l': int64
@@ -299,8 +304,13 @@ func writeField(w io.Writer, value interface{}) (err error) {
enc = buf[:2]
case byte:
+ buf[0] = 'B'
+ buf[1] = v
+ enc = buf[:2]
+
+ case int8:
buf[0] = 'b'
- buf[1] = byte(v)
+ buf[1] = uint8(v)
enc = buf[:2]
case int16:
@@ -335,7 +345,7 @@ func writeField(w io.Writer, value interface{}) (err error) {
case Decimal:
buf[0] = 'D'
- buf[1] = byte(v.Scale)
+ buf[1] = v.Scale
binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value))
enc = buf[:6]
@@ -412,5 +422,5 @@ func writeTable(w io.Writer, table Table) (err error) {
}
}
- return writeLongstr(w, string(buf.Bytes()))
+ return writeLongstr(w, buf.String())
}
diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE
new file mode 100644
index 0000000000..f4967dbc5c
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/extra/rediscmd/rediscmd.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go
similarity index 98%
rename from vendor/github.com/go-redis/redis/extra/rediscmd/rediscmd.go
rename to vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go
index 12ea39fc1e..c97689f95c 100644
--- a/vendor/github.com/go-redis/redis/extra/rediscmd/rediscmd.go
+++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go
@@ -7,7 +7,7 @@ import (
"strings"
"time"
- "github.com/go-redis/redis/v8"
+ "github.com/redis/go-redis/v9"
)
func CmdString(cmd redis.Cmder) string {
diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go
new file mode 100644
index 0000000000..6d3a8b7a29
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/safe.go
@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package rediscmd
+
+func String(b []byte) string {
+ return string(b)
+}
+
+func Bytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go
new file mode 100644
index 0000000000..7ccdf2fef2
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/unsafe.go
@@ -0,0 +1,21 @@
+//go:build !appengine
+// +build !appengine
+
+package rediscmd
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE b/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE
new file mode 100644
index 0000000000..f4967dbc5c
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md b/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md
new file mode 100644
index 0000000000..997c17d1c5
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/README.md
@@ -0,0 +1,34 @@
+# OpenTelemetry instrumentation for go-redis
+
+## Installation
+
+```bash
+go get github.com/redis/go-redis/extra/redisotel/v9
+```
+
+## Usage
+
+Tracing is enabled by adding a hook:
+
+```go
+import (
+ "github.com/redis/go-redis/v9"
+ "github.com/redis/go-redis/extra/redisotel/v9"
+)
+
+rdb := rdb.NewClient(&rdb.Options{...})
+
+// Enable tracing instrumentation.
+if err := redisotel.InstrumentTracing(rdb); err != nil {
+ panic(err)
+}
+
+// Enable metrics instrumentation.
+if err := redisotel.InstrumentMetrics(rdb); err != nil {
+ panic(err)
+}
+```
+
+See [example](../../example/otel) and
+[Monitoring Go Redis Performance and Errors](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+for details.
diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go
new file mode 100644
index 0000000000..c24f896729
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go
@@ -0,0 +1,138 @@
+package redisotel
+
+import (
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type config struct {
+ // Common options.
+
+ dbSystem string
+ attrs []attribute.KeyValue
+
+ // Tracing options.
+
+ tp trace.TracerProvider
+ tracer trace.Tracer
+
+ dbStmtEnabled bool
+
+ // Metrics options.
+
+ mp metric.MeterProvider
+ meter metric.Meter
+
+ poolName string
+}
+
+type baseOption interface {
+ apply(conf *config)
+}
+
+type Option interface {
+ baseOption
+ tracing()
+ metrics()
+}
+
+type option func(conf *config)
+
+func (fn option) apply(conf *config) {
+ fn(conf)
+}
+
+func (fn option) tracing() {}
+
+func (fn option) metrics() {}
+
+func newConfig(opts ...baseOption) *config {
+ conf := &config{
+ dbSystem: "redis",
+ attrs: []attribute.KeyValue{},
+
+ tp: otel.GetTracerProvider(),
+ mp: otel.GetMeterProvider(),
+ dbStmtEnabled: true,
+ }
+
+ for _, opt := range opts {
+ opt.apply(conf)
+ }
+
+ conf.attrs = append(conf.attrs, semconv.DBSystemKey.String(conf.dbSystem))
+
+ return conf
+}
+
+func WithDBSystem(dbSystem string) Option {
+ return option(func(conf *config) {
+ conf.dbSystem = dbSystem
+ })
+}
+
+// WithAttributes specifies additional attributes to be added to the span.
+func WithAttributes(attrs ...attribute.KeyValue) Option {
+ return option(func(conf *config) {
+ conf.attrs = append(conf.attrs, attrs...)
+ })
+}
+
+//------------------------------------------------------------------------------
+
+type TracingOption interface {
+ baseOption
+ tracing()
+}
+
+type tracingOption func(conf *config)
+
+var _ TracingOption = (*tracingOption)(nil)
+
+func (fn tracingOption) apply(conf *config) {
+ fn(conf)
+}
+
+func (fn tracingOption) tracing() {}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) TracingOption {
+ return tracingOption(func(conf *config) {
+ conf.tp = provider
+ })
+}
+
+// WithDBStatement tells the tracing hook not to log raw redis commands.
+func WithDBStatement(on bool) TracingOption {
+ return tracingOption(func(conf *config) {
+ conf.dbStmtEnabled = on
+ })
+}
+
+//------------------------------------------------------------------------------
+
+type MetricsOption interface {
+ baseOption
+ metrics()
+}
+
+type metricsOption func(conf *config)
+
+var _ MetricsOption = (*metricsOption)(nil)
+
+func (fn metricsOption) apply(conf *config) {
+ fn(conf)
+}
+
+func (fn metricsOption) metrics() {}
+
+// WithMeterProvider configures a metric.Meter used to create instruments.
+func WithMeterProvider(mp metric.MeterProvider) MetricsOption {
+ return metricsOption(func(conf *config) {
+ conf.mp = mp
+ })
+}
diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go
new file mode 100644
index 0000000000..695c7ee3ef
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go
@@ -0,0 +1,253 @@
+package redisotel
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/redis/go-redis/v9"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// InstrumentMetrics starts reporting OpenTelemetry Metrics.
+//
+// Based on https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/database-metrics.md
+func InstrumentMetrics(rdb redis.UniversalClient, opts ...MetricsOption) error {
+ baseOpts := make([]baseOption, len(opts))
+ for i, opt := range opts {
+ baseOpts[i] = opt
+ }
+ conf := newConfig(baseOpts...)
+
+ if conf.meter == nil {
+ conf.meter = conf.mp.Meter(
+ instrumName,
+ metric.WithInstrumentationVersion("semver:"+redis.Version()),
+ )
+ }
+
+ switch rdb := rdb.(type) {
+ case *redis.Client:
+ if conf.poolName == "" {
+ opt := rdb.Options()
+ conf.poolName = opt.Addr
+ }
+ conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName))
+
+ if err := reportPoolStats(rdb, conf); err != nil {
+ return err
+ }
+ if err := addMetricsHook(rdb, conf); err != nil {
+ return err
+ }
+ return nil
+ case *redis.ClusterClient:
+ rdb.OnNewNode(func(rdb *redis.Client) {
+ if conf.poolName == "" {
+ opt := rdb.Options()
+ conf.poolName = opt.Addr
+ }
+ conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName))
+
+ if err := reportPoolStats(rdb, conf); err != nil {
+ otel.Handle(err)
+ }
+ if err := addMetricsHook(rdb, conf); err != nil {
+ otel.Handle(err)
+ }
+ })
+ return nil
+ case *redis.Ring:
+ rdb.OnNewNode(func(rdb *redis.Client) {
+ if conf.poolName == "" {
+ opt := rdb.Options()
+ conf.poolName = opt.Addr
+ }
+ conf.attrs = append(conf.attrs, attribute.String("pool.name", conf.poolName))
+
+ if err := reportPoolStats(rdb, conf); err != nil {
+ otel.Handle(err)
+ }
+ if err := addMetricsHook(rdb, conf); err != nil {
+ otel.Handle(err)
+ }
+ })
+ return nil
+ default:
+ return fmt.Errorf("redisotel: %T not supported", rdb)
+ }
+}
+
+func reportPoolStats(rdb *redis.Client, conf *config) error {
+ labels := conf.attrs
+ idleAttrs := append(labels, attribute.String("state", "idle"))
+ usedAttrs := append(labels, attribute.String("state", "used"))
+
+ idleMax, err := conf.meter.Int64ObservableUpDownCounter(
+ "db.client.connections.idle.max",
+ metric.WithDescription("The maximum number of idle open connections allowed"),
+ )
+ if err != nil {
+ return err
+ }
+
+ idleMin, err := conf.meter.Int64ObservableUpDownCounter(
+ "db.client.connections.idle.min",
+ metric.WithDescription("The minimum number of idle open connections allowed"),
+ )
+ if err != nil {
+ return err
+ }
+
+ connsMax, err := conf.meter.Int64ObservableUpDownCounter(
+ "db.client.connections.max",
+ metric.WithDescription("The maximum number of open connections allowed"),
+ )
+ if err != nil {
+ return err
+ }
+
+ usage, err := conf.meter.Int64ObservableUpDownCounter(
+ "db.client.connections.usage",
+ metric.WithDescription("The number of connections that are currently in state described by the state attribute"),
+ )
+ if err != nil {
+ return err
+ }
+
+ timeouts, err := conf.meter.Int64ObservableUpDownCounter(
+ "db.client.connections.timeouts",
+ metric.WithDescription("The number of connection timeouts that have occurred trying to obtain a connection from the pool"),
+ )
+ if err != nil {
+ return err
+ }
+
+ redisConf := rdb.Options()
+ _, err = conf.meter.RegisterCallback(
+ func(ctx context.Context, o metric.Observer) error {
+ stats := rdb.PoolStats()
+
+ o.ObserveInt64(idleMax, int64(redisConf.MaxIdleConns), metric.WithAttributes(labels...))
+ o.ObserveInt64(idleMin, int64(redisConf.MinIdleConns), metric.WithAttributes(labels...))
+ o.ObserveInt64(connsMax, int64(redisConf.PoolSize), metric.WithAttributes(labels...))
+
+ o.ObserveInt64(usage, int64(stats.IdleConns), metric.WithAttributes(idleAttrs...))
+ o.ObserveInt64(usage, int64(stats.TotalConns-stats.IdleConns), metric.WithAttributes(usedAttrs...))
+
+ o.ObserveInt64(timeouts, int64(stats.Timeouts), metric.WithAttributes(labels...))
+ return nil
+ },
+ idleMax,
+ idleMin,
+ connsMax,
+ usage,
+ timeouts,
+ )
+
+ return err
+}
+
+func addMetricsHook(rdb *redis.Client, conf *config) error {
+ createTime, err := conf.meter.Float64Histogram(
+ "db.client.connections.create_time",
+ metric.WithDescription("The time it took to create a new connection."),
+ metric.WithUnit("ms"),
+ )
+ if err != nil {
+ return err
+ }
+
+ useTime, err := conf.meter.Float64Histogram(
+ "db.client.connections.use_time",
+ metric.WithDescription("The time between borrowing a connection and returning it to the pool."),
+ metric.WithUnit("ms"),
+ )
+ if err != nil {
+ return err
+ }
+
+ rdb.AddHook(&metricsHook{
+ createTime: createTime,
+ useTime: useTime,
+ attrs: conf.attrs,
+ })
+ return nil
+}
+
+type metricsHook struct {
+ createTime metric.Float64Histogram
+ useTime metric.Float64Histogram
+ attrs []attribute.KeyValue
+}
+
+var _ redis.Hook = (*metricsHook)(nil)
+
+func (mh *metricsHook) DialHook(hook redis.DialHook) redis.DialHook {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ start := time.Now()
+
+ conn, err := hook(ctx, network, addr)
+
+ attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+1)
+ attrs = append(attrs, mh.attrs...)
+ attrs = append(attrs, statusAttr(err))
+
+ mh.createTime.Record(ctx, milliseconds(time.Since(start)), metric.WithAttributes(attrs...))
+ return conn, err
+ }
+}
+
+func (mh *metricsHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ start := time.Now()
+
+ err := hook(ctx, cmd)
+
+ dur := time.Since(start)
+
+ attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+2)
+ attrs = append(attrs, mh.attrs...)
+ attrs = append(attrs, attribute.String("type", "command"))
+ attrs = append(attrs, statusAttr(err))
+
+ mh.useTime.Record(ctx, milliseconds(dur), metric.WithAttributes(attrs...))
+
+ return err
+ }
+}
+
+func (mh *metricsHook) ProcessPipelineHook(
+ hook redis.ProcessPipelineHook,
+) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ start := time.Now()
+
+ err := hook(ctx, cmds)
+
+ dur := time.Since(start)
+
+ attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+2)
+ attrs = append(attrs, mh.attrs...)
+ attrs = append(attrs, attribute.String("type", "pipeline"))
+ attrs = append(attrs, statusAttr(err))
+
+ mh.useTime.Record(ctx, milliseconds(dur), metric.WithAttributes(attrs...))
+
+ return err
+ }
+}
+
+func milliseconds(d time.Duration) float64 {
+ return float64(d) / float64(time.Millisecond)
+}
+
+func statusAttr(err error) attribute.KeyValue {
+ if err != nil {
+ return attribute.String("status", "error")
+ }
+ return attribute.String("status", "ok")
+}
diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go
new file mode 100644
index 0000000000..0bbf692adf
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go
@@ -0,0 +1,215 @@
+package redisotel
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "runtime"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
+ "go.opentelemetry.io/otel/trace"
+
+ "github.com/redis/go-redis/extra/rediscmd/v9"
+ "github.com/redis/go-redis/v9"
+)
+
+const (
+ instrumName = "github.com/redis/go-redis/extra/redisotel"
+)
+
+func InstrumentTracing(rdb redis.UniversalClient, opts ...TracingOption) error {
+ switch rdb := rdb.(type) {
+ case *redis.Client:
+ opt := rdb.Options()
+ connString := formatDBConnString(opt.Network, opt.Addr)
+ rdb.AddHook(newTracingHook(connString, opts...))
+ return nil
+ case *redis.ClusterClient:
+ rdb.AddHook(newTracingHook("", opts...))
+
+ rdb.OnNewNode(func(rdb *redis.Client) {
+ opt := rdb.Options()
+ connString := formatDBConnString(opt.Network, opt.Addr)
+ rdb.AddHook(newTracingHook(connString, opts...))
+ })
+ return nil
+ case *redis.Ring:
+ rdb.AddHook(newTracingHook("", opts...))
+
+ rdb.OnNewNode(func(rdb *redis.Client) {
+ opt := rdb.Options()
+ connString := formatDBConnString(opt.Network, opt.Addr)
+ rdb.AddHook(newTracingHook(connString, opts...))
+ })
+ return nil
+ default:
+ return fmt.Errorf("redisotel: %T not supported", rdb)
+ }
+}
+
+type tracingHook struct {
+ conf *config
+
+ spanOpts []trace.SpanStartOption
+}
+
+var _ redis.Hook = (*tracingHook)(nil)
+
+func newTracingHook(connString string, opts ...TracingOption) *tracingHook {
+ baseOpts := make([]baseOption, len(opts))
+ for i, opt := range opts {
+ baseOpts[i] = opt
+ }
+ conf := newConfig(baseOpts...)
+
+ if conf.tracer == nil {
+ conf.tracer = conf.tp.Tracer(
+ instrumName,
+ trace.WithInstrumentationVersion("semver:"+redis.Version()),
+ )
+ }
+ if connString != "" {
+ conf.attrs = append(conf.attrs, semconv.DBConnectionStringKey.String(connString))
+ }
+
+ return &tracingHook{
+ conf: conf,
+
+ spanOpts: []trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(conf.attrs...),
+ },
+ }
+}
+
+func (th *tracingHook) DialHook(hook redis.DialHook) redis.DialHook {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ if !trace.SpanFromContext(ctx).IsRecording() {
+ return hook(ctx, network, addr)
+ }
+
+ ctx, span := th.conf.tracer.Start(ctx, "redis.dial", th.spanOpts...)
+ defer span.End()
+
+ conn, err := hook(ctx, network, addr)
+ if err != nil {
+ recordError(span, err)
+ return nil, err
+ }
+ return conn, nil
+ }
+}
+
+func (th *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook {
+ return func(ctx context.Context, cmd redis.Cmder) error {
+ if !trace.SpanFromContext(ctx).IsRecording() {
+ return hook(ctx, cmd)
+ }
+
+ fn, file, line := funcFileLine("github.com/redis/go-redis")
+
+ attrs := make([]attribute.KeyValue, 0, 8)
+ attrs = append(attrs,
+ semconv.CodeFunctionKey.String(fn),
+ semconv.CodeFilepathKey.String(file),
+ semconv.CodeLineNumberKey.Int(line),
+ )
+
+ if th.conf.dbStmtEnabled {
+ cmdString := rediscmd.CmdString(cmd)
+ attrs = append(attrs, semconv.DBStatementKey.String(cmdString))
+ }
+
+ opts := th.spanOpts
+ opts = append(opts, trace.WithAttributes(attrs...))
+
+ ctx, span := th.conf.tracer.Start(ctx, cmd.FullName(), opts...)
+ defer span.End()
+
+ if err := hook(ctx, cmd); err != nil {
+ recordError(span, err)
+ return err
+ }
+ return nil
+ }
+}
+
+func (th *tracingHook) ProcessPipelineHook(
+ hook redis.ProcessPipelineHook,
+) redis.ProcessPipelineHook {
+ return func(ctx context.Context, cmds []redis.Cmder) error {
+ if !trace.SpanFromContext(ctx).IsRecording() {
+ return hook(ctx, cmds)
+ }
+
+ fn, file, line := funcFileLine("github.com/redis/go-redis")
+
+ attrs := make([]attribute.KeyValue, 0, 8)
+ attrs = append(attrs,
+ semconv.CodeFunctionKey.String(fn),
+ semconv.CodeFilepathKey.String(file),
+ semconv.CodeLineNumberKey.Int(line),
+ attribute.Int("db.redis.num_cmd", len(cmds)),
+ )
+
+ summary, cmdsString := rediscmd.CmdsString(cmds)
+ if th.conf.dbStmtEnabled {
+ attrs = append(attrs, semconv.DBStatementKey.String(cmdsString))
+ }
+
+ opts := th.spanOpts
+ opts = append(opts, trace.WithAttributes(attrs...))
+
+ ctx, span := th.conf.tracer.Start(ctx, "redis.pipeline "+summary, opts...)
+ defer span.End()
+
+ if err := hook(ctx, cmds); err != nil {
+ recordError(span, err)
+ return err
+ }
+ return nil
+ }
+}
+
+func recordError(span trace.Span, err error) {
+ if err != redis.Nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ }
+}
+
+func formatDBConnString(network, addr string) string {
+ if network == "tcp" {
+ network = "redis"
+ }
+ return fmt.Sprintf("%s://%s", network, addr)
+}
+
+func funcFileLine(pkg string) (string, string, int) {
+ const depth = 16
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ ff := runtime.CallersFrames(pcs[:n])
+
+ var fn, file string
+ var line int
+ for {
+ f, ok := ff.Next()
+ if !ok {
+ break
+ }
+ fn, file, line = f.Function, f.File, f.Line
+ if !strings.Contains(fn, pkg) {
+ break
+ }
+ }
+
+ if ind := strings.LastIndexByte(fn, '/'); ind != -1 {
+ fn = fn[ind+1:]
+ }
+
+ return fn, file, line
+}
diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore
new file mode 100644
index 0000000000..6f868895ba
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/.gitignore
@@ -0,0 +1,6 @@
+*.rdb
+testdata/*
+.idea/
+.DS_Store
+*.tar.gz
+*.dic
\ No newline at end of file
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.golangci.yml
rename to vendor/github.com/redis/go-redis/v9/.golangci.yml
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.prettierrc.yml
rename to vendor/github.com/redis/go-redis/v9/.prettierrc.yml
diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
new file mode 100644
index 0000000000..297438a9fc
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md
@@ -0,0 +1,124 @@
+## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29)
+
+
+### Features
+
+* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602))
+* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe))
+* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af))
+
+
+
+## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01)
+
+
+### Bug Fixes
+
+* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241))
+
+
+### Features
+
+* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e))
+* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8))
+* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af))
+
+
+
+## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02)
+
+### New Features
+
+- feat(scan): scan time.Time sets the default decoding (#2413)
+- Add support for CLUSTER LINKS command (#2504)
+- Add support for acl dryrun command (#2502)
+- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500)
+- Add support for LCS Command (#2480)
+- Add support for BZMPOP (#2456)
+- Adding support for ZMPOP command (#2408)
+- Add support for LMPOP (#2440)
+- feat: remove pool unused fields (#2438)
+- Expiretime and PExpireTime (#2426)
+- Implement `FUNCTION` group of commands (#2475)
+- feat(zadd): add ZAddLT and ZAddGT (#2429)
+- Add: Support for COMMAND LIST command (#2491)
+- Add support for BLMPOP (#2442)
+- feat: check pipeline.Do to prevent confusion with Exec (#2517)
+- Function stats, function kill, fcall and fcall_ro (#2486)
+- feat: Add support for CLUSTER SHARDS command (#2507)
+- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498)
+
+### Fixed
+
+- fix: eval api cmd.SetFirstKeyPos (#2501)
+- fix: limit the number of connections created (#2441)
+- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479)
+- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458)
+- fix: group lag can be null (#2448)
+
+### Maintenance
+
+- Updating to the latest version of redis (#2508)
+- Allowing for running tests on a port other than the fixed 6380 (#2466)
+- redis 7.0.8 in tests (#2450)
+- docs: Update redisotel example for v9 (#2425)
+- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476)
+- chore: add Chinese translation (#2436)
+- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421)
+- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420)
+- chore(deps): bump actions/setup-go from 3 to 4 (#2495)
+- docs: add instructions for the HSet api (#2503)
+- docs: add reading lag field comment (#2451)
+- test: update go mod before testing(go mod tidy) (#2423)
+- docs: fix comment typo (#2505)
+- test: remove testify (#2463)
+- refactor: change ListElementCmd to KeyValuesCmd. (#2443)
+- fix(appendArg): appendArg case special type (#2489)
+
+## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01)
+
+### Features
+
+* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65))
+
+## v9 2023-01-30
+
+### Breaking
+
+- Changed Pipelines to not be thread-safe any more.
+
+### Added
+
+- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was
+ contributed by @monkey92t who has done the majority of work in this release.
+- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts
+ and deadlines. See
+ [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details.
+- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example,
+ `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`.
+- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html)
+- Added `redis.HasErrorPrefix` to help working with errors.
+
+### Changed
+
+- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is
+ completely gone in v9.
+- Reworked hook interface and added `DialHook`.
+- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See
+ [example](example/otel) and
+ [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html).
+- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making
+ an allocation.
+- Renamed the option `MaxConnAge` to `ConnMaxLifetime`.
+- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`.
+- Removed connection reaper in favor of `MaxIdleConns`.
+- Removed `WithContext` since `context.Context` can be passed directly as an arg.
+- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and
+ it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to
+ reset commands for some reason.
+
+### Fixed
+
+- Improved and fixed pipeline retries.
+- As usually, added support for more commands and fixed some bugs.
diff --git a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md
new file mode 100644
index 0000000000..90030b89f6
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md
@@ -0,0 +1,101 @@
+# Contributing
+
+## Introduction
+
+We appreciate your interest in considering contributing to go-redis.
+Community contributions mean a lot to us.
+
+## Contributions we need
+
+You may already know how you'd like to contribute, whether it's a fix for a bug you
+encountered, or a new feature your team wants to use.
+
+If you don't know where to start, consider improving
+documentation, bug triaging, and writing tutorials are all examples of
+helpful contributions that mean less work for you.
+
+## Your First Contribution
+
+Unsure where to begin contributing? You can start by looking through
+[help-wanted
+issues](https://github.com/redis/go-redis/issues?q=is%3Aopen+is%3Aissue+label%3ahelp-wanted).
+
+Never contributed to open source before? Here are a couple of friendly
+tutorials:
+
+-
+-
+
+## Getting Started
+
+Here's how to get started with your code contribution:
+
+1. Create your own fork of go-redis
+2. Do the changes in your fork
+3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running:
+ ```docker run -p 6379:6379 -it redis/redis-stack-server:edge```
+4. While developing, make sure the tests pass by running `make tests`
+5. If you like the change and think the project could use it, send a
+ pull request
+
+To see what else is part of the automation, run `invoke -l`
+
+## Testing
+
+Call `make test` to run all tests, including linters.
+
+Continuous Integration uses these same wrappers to run all of these
+tests against multiple versions of python. Feel free to test your
+changes against all the go versions supported, as declared by the
+[build.yml](./.github/workflows/build.yml) file.
+
+### Troubleshooting
+
+If you get any errors when running `make test`, make sure
+that you are using supported versions of Docker and go.
+
+## How to Report a Bug
+
+### Security Vulnerabilities
+
+**NOTE**: If you find a security vulnerability, do NOT open an issue.
+Email [Redis Open Source ()](mailto:oss@redis.com) instead.
+
+In order to determine whether you are dealing with a security issue, ask
+yourself these two questions:
+
+- Can I access something that's not mine, or something I shouldn't
+ have access to?
+- Can I disable something for other people?
+
+If the answer to either of those two questions are *yes*, then you're
+probably dealing with a security issue. Note that even if you answer
+*no* to both questions, you may still be dealing with a security
+issue, so if you're unsure, just email [us](mailto:oss@redis.com).
+
+### Everything Else
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of go-redis are you using?
+2. What version of redis are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Suggest a feature or enhancement
+
+If you'd like to contribute a new feature, make sure you check our
+issue list to see if someone has already proposed it. Work may already
+be underway on the feature you want or we may have rejected a
+feature like it already.
+
+If you don't see anything, open a new issue that describes the feature
+you would like and how it should work.
+
+## Code review process
+
+The core team regularly looks at pull requests. We will provide
+feedback as soon as possible. After receiving our feedback, please respond
+within two weeks. After that time, we may close your PR if it isn't
+showing any activity.
diff --git a/vendor/github.com/redis/go-redis/v9/LICENSE b/vendor/github.com/redis/go-redis/v9/LICENSE
new file mode 100644
index 0000000000..f4967dbc5c
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/redis/go-redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile
new file mode 100644
index 0000000000..dc2fe780aa
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/Makefile
@@ -0,0 +1,44 @@
+GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go test in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy -compat=1.18 && \
+ go test && \
+ go test ./... -short -race && \
+ go test ./... -run=NONE -bench=. -benchmem && \
+ env GOOS=linux GOARCH=386 go test && \
+ go vet); \
+ done
+ cd internal/customvet && go build .
+ go vet -vettool ./internal/customvet/customvet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench fmt
+
+build:
+ go build .
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+fmt:
+ gofumpt -w ./
+ goimports -w -local github.com/redis/go-redis ./
+
+go_mod_tidy:
+ set -e; for dir in $(GO_MOD_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u ./... && \
+ go mod tidy -compat=1.18); \
+ done
diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md
new file mode 100644
index 0000000000..043d3f0e6d
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/README.md
@@ -0,0 +1,274 @@
+# Redis client for Go
+
+[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can
+> use it to monitor applications and set up automatic alerts to receive notifications via email,
+> Slack, Telegram, and others.
+>
+> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which
+> demonstrates how you can use Uptrace to monitor go-redis.
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://university.redis.com/)
+
+[Build faster with the Redis Launchpad](https://launchpad.redis.com/)
+
+[Try the Redis Cloud](https://redis.com/try-free/)
+
+[Dive in developer tutorials](https://developer.redis.com/)
+
+[Join the Redis community](https://redis.com/community/)
+
+[Work at Redis](https://redis.com/company/careers/jobs/)
+
+## Documentation
+
+- [English](https://redis.uptrace.dev)
+- [简体中文](https://redis.uptrace.dev/zh/)
+
+## Resources
+
+- [Discussions](https://github.com/redis/go-redis/discussions)
+- [Chat](https://discord.gg/rWtp5Aj)
+- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9)
+- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed
+key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol.
+
+## Features
+
+- Redis commands except QUIT and SYNC.
+- Automatic connection pooling.
+- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html).
+- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html).
+- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html).
+- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html).
+- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html).
+- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
+- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
+- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+Then install go-redis/**v9**:
+
+```shell
+go get github.com/redis/go-redis/v9
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+The above can be modified to specify the version of the RESP protocol by adding the `protocol`
+option to the `Options` struct:
+
+```go
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3
+ })
+
+```
+
+### Connecting via a redis url
+
+go-redis also supports connecting via the
+[redis uri specification](https://github.com/redis/redis-specifications/tree/master/uri/redis.txt).
+The example below demonstrates how the connection can easily be configured using a string, adhering
+to this specification.
+
+```go
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9"
+)
+
+func ExampleClient() *redis.Client {
+ url := "redis://user:password@localhost:6379/0?protocol=3"
+ opts, err := redis.ParseURL(url)
+ if err != nil {
+ panic(err)
+ }
+
+ return redis.NewClient(opts)
+}
+
+```
+
+
+### Advanced Configuration
+
+go-redis supports extending the client identification phase to allow projects to send their own custom client identification.
+
+#### Default Client Identification
+
+By default, go-redis automatically sends the client library name and version during the connection process. This feature is available in redis-server as of version 7.2. As a result, the command is "fire and forget", meaning it should fail silently, in the case that the redis server does not support this feature.
+
+#### Disabling Identity Verification
+
+When connection identity verification is not required or needs to be explicitly disabled, a `DisableIndentity` configuration option exists. In V10 of this library, `DisableIndentity` will become `DisableIdentity` in order to fix the associated typo.
+
+To disable verification, set the `DisableIndentity` option to `true` in the Redis client options:
+
+```go
+rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "",
+ DB: 0,
+ DisableIndentity: true, // Disable set-info on connect
+})
+```
+
+## Contributing
+
+Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library!
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```go
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```shell
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```shell
+go test
+```
+
+Another option is to run your specific tests with an already running redis. The example below, tests
+against a redis running on port 9999.:
+
+```shell
+REDIS_PORT=9999 go test
+```
+
+## See also
+
+- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite
+- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
+- [Golang HTTP router](https://bunrouter.uptrace.dev/)
+- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+
+
+
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/redis/go-redis/v9/RELEASING.md
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/RELEASING.md
rename to vendor/github.com/redis/go-redis/v9/RELEASING.md
diff --git a/vendor/github.com/redis/go-redis/v9/acl_commands.go b/vendor/github.com/redis/go-redis/v9/acl_commands.go
new file mode 100644
index 0000000000..06847be2ed
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/acl_commands.go
@@ -0,0 +1,35 @@
+package redis
+
+import "context"
+
+type ACLCmdable interface {
+ ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd
+ ACLLog(ctx context.Context, count int64) *ACLLogCmd
+ ACLLogReset(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd {
+ args := make([]interface{}, 0, 3+len(command))
+ args = append(args, "acl", "dryrun", username)
+ args = append(args, command...)
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd {
+ args := make([]interface{}, 0, 3)
+ args = append(args, "acl", "log")
+ if count > 0 {
+ args = append(args, count)
+ }
+ cmd := NewACLLogCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "acl", "log", "reset")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/bitmap_commands.go b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go
new file mode 100644
index 0000000000..d9fc50dce5
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/bitmap_commands.go
@@ -0,0 +1,163 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type BitMapCmdable interface {
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd
+ BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd
+}
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+ Unit string // BYTE(default) | BIT
+}
+
+const BitCountIndexByte string = "BYTE"
+const BitCountIndexBit string = "BIT"
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ if bitCount.Unit == "" {
+ bitCount.Unit = "BYTE"
+ }
+ if bitCount.Unit != BitCountIndexByte && bitCount.Unit != BitCountIndexBit {
+ cmd := NewIntCmd(ctx)
+ cmd.SetErr(errors.New("redis: invalid bitcount index"))
+ return cmd
+ }
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ string(bitCount.Unit),
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end
+// if you need the `byte | bit` parameter, please use `BitPosSpan`.
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitPosSpan supports the `byte | bit` parameters in redis version 7.0,
+// the bitpos command defaults to using byte type for the `start-end` range,
+// which means it counts in bytes from start to end. you can set the value
+// of "span" to determine the type of `start-end`.
+// span = "bit", cmd: bitpos key bit start end bit
+// span = "byte", cmd: bitpos key bit start end byte
+func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd {
+ cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitField accepts multiple values:
+// - BitField("set", "i1", "offset1", "value1","cmd2", "type2", "offset2", "value2")
+// - BitField([]string{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+// - BitField([]interface{}{"cmd1", "type1", "offset1", "value1","cmd2", "type2", "offset2", "value2"})
+func (c cmdable) BitField(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "bitfield"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BitFieldRO - Read-only variant of the BITFIELD command.
+// It is like the original BITFIELD but only accepts GET subcommand and can safely be used in read-only replicas.
+// - BitFieldRO(ctx, key, "", "", "","")
+func (c cmdable) BitFieldRO(ctx context.Context, key string, values ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "BITFIELD_RO"
+ args[1] = key
+ if len(values)%2 != 0 {
+ panic("BitFieldRO: invalid number of arguments, must be even")
+ }
+ for i := 0; i < len(values); i += 2 {
+ args = append(args, "GET", values[i], values[i+1])
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
new file mode 100644
index 0000000000..0caf0977a7
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go
@@ -0,0 +1,192 @@
+package redis
+
+import "context"
+
+type ClusterCmdable interface {
+ ClusterMyShardID(ctx context.Context) *StringCmd
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterShards(ctx context.Context) *ClusterShardsCmd
+ ClusterLinks(ctx context.Context) *ClusterLinksCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+}
+
+func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "myshardid")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterShards(ctx context.Context) *ClusterShardsCmd {
+ cmd := NewClusterShardsCmd(ctx, "cluster", "shards")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterLinks(ctx context.Context) *ClusterLinksCmd {
+ cmd := NewClusterLinksCmd(ctx, "cluster", "links")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go
new file mode 100644
index 0000000000..9fb9a8310b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/command.go
@@ -0,0 +1,5483 @@
+package redis
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/util"
+)
+
+type Cmder interface {
+ // command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster".
+ Name() string
+
+ // full command name.
+ // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info".
+ FullName() string
+
+ // all args of the command.
+ // e.g. "set k v ex 10" -> "[set k v ex 10]".
+ Args() []interface{}
+
+ // format request and response string.
+ // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v".
+ String() string
+
+ stringArg(int) string
+ firstKeyPos() int8
+ SetFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha", "eval_ro", "evalsha_ro":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+ return 1
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return util.BytesToString(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
+ case bool:
+ return val, nil
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadSlice()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadInt()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ second, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ microsecond, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val = time.Unix(second, microsecond*1000)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadBool()
+
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ err = nil
+ }
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloat()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloat(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValue struct {
+ Key string
+ Value string
+}
+
+type KeyValueSliceCmd struct {
+ baseCmd
+
+ val []KeyValue
+}
+
+var _ Cmder = (*KeyValueSliceCmd)(nil)
+
+func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd {
+ return &KeyValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) {
+ cmd.val = val
+}
+
+func (cmd *KeyValueSliceCmd) Val() []KeyValue {
+ return cmd.val
+}
+
+func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Many commands will respond to two formats:
+// 1. 1) "one"
+// 2. (double) 1
+// 2. 1) "two"
+// 2. (double) 2
+//
+// OR:
+// 1. "two"
+// 2. (double) 2
+// 3. "one"
+// 4. (double) 1
+func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]KeyValue, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]KeyValue, n)
+ } else {
+ cmd.val = make([]KeyValue, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Value, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if cmd.val[i], err = rd.ReadBool(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringStringCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*MapStringStringCmd)(nil)
+
+func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd {
+ return &MapStringStringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *MapStringStringCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]string, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type MapStringIntCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*MapStringIntCmd)(nil)
+
+func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd {
+ return &MapStringIntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringIntCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
+func (cmd *MapStringIntCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *MapStringIntCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringIntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = nn
+ }
+ return nil
+}
+
+// ------------------------------------------------------------------------------
+type MapStringSliceInterfaceCmd struct {
+ baseCmd
+ val map[string][]interface{}
+}
+
+func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd {
+ return &MapStringSliceInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringSliceInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string][]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[k] = make([]interface{}, nn)
+ for j := 0; j < nn; j++ {
+ value, err := rd.ReadReply()
+ if err != nil {
+ return err
+ }
+ cmd.val[k][j] = value
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]struct{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ if msgs[i], err = readXMessage(rd); err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return XMessage{}, err
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ v, err := stringInterfaceMapParser(rd)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ }
+
+ return XMessage{
+ ID: id,
+ Values: v,
+ }, nil
+}
+
+func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+
+ var n int
+ if typ == proto.RespMap {
+ n, err = rd.ReadMapLen()
+ } else {
+ n, err = rd.ReadArrayLen()
+ }
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ if typ != proto.RespMap {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+ if cmd.val[i].Stream, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var err error
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+ cmd.val = &XPending{}
+
+ if cmd.val.Count, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val.Consumers = make(map[string]int64, n)
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XPendingExt, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return err
+ }
+
+ if cmd.val[i].ID, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil {
+ return err
+ }
+
+ idle, err := rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+
+ if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch n {
+ case 2, // Redis 6
+ 3: // Redis 7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n)
+ }
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if n >= 3 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle time.Duration
+ Inactive time.Duration
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for f := 0; f < nn; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ cmd.val[i].Name, err = rd.ReadString()
+ case "pending":
+ cmd.val[i].Pending, err = rd.ReadInt()
+ case "idle":
+ var idle int64
+ idle, err = rd.ReadInt()
+ cmd.val[i].Idle = time.Duration(idle) * time.Millisecond
+ case "inactive":
+ var inactive int64
+ inactive, err = rd.ReadInt()
+ cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond
+ default:
+ return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ group := &cmd.val[i]
+
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ for j := 0; j < nn; j++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "consumers":
+ group.Consumers, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "pending":
+ group.Pending, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return err
+ }
+ case "lag":
+ group.Lag, err = rd.ReadInt()
+
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ if err != nil && err != Nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ FirstEntry XMessage
+ LastEntry XMessage
+ RecordedFirstEntryID string
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = &XInfoStream{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "first-entry":
+ cmd.val.FirstEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "last-entry":
+ cmd.val.LastEntry, err = readXMessage(rd)
+ if err != nil && err != Nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key)
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ MaxDeletedEntryID string
+ EntriesAdded int64
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+ RecordedFirstEntryID string
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ EntriesRead int64
+ Lag int64
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ ActiveTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "entries-added":
+ cmd.val.EntriesAdded, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ if err != nil {
+ return err
+ }
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ if err != nil {
+ return err
+ }
+ case "max-deleted-entry-id":
+ cmd.val.MaxDeletedEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ case "recorded-first-entry-id":
+ cmd.val.RecordedFirstEntryID, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ group := XInfoStreamGroup{}
+
+ for j := 0; j < nn; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ case "entries-read":
+ group.EntriesRead, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "lag":
+ // lag: the number of entries in the stream that are still waiting to be delivered
+ // to the group's consumers, or a NULL(Nil) when that number can't be determined.
+ group.Lag, err = rd.ReadInt()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+ case "pel-count":
+ group.PelCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ if err != nil {
+ return nil, err
+ }
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key)
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ if err = rd.ReadFixedArrayLen(4); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < nn; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.UnixMilli(seen)
+ case "active-time":
+ active, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ c.ActiveTime = time.UnixMilli(active)
+ case "pel-count":
+ c.PelCount, err = rd.ReadInt()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return nil, err
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM FULL reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ // If the n is 0, can't continue reading.
+ if n == 0 {
+ cmd.val = make([]Z, 0)
+ return nil
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(3); err != nil {
+ return err
+ }
+ cmd.val = &ZWithKey{}
+
+ if cmd.val.Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.val.Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cursor, err := rd.ReadUint()
+ if err != nil {
+ return err
+ }
+ cmd.cursor = cursor
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.page = make([]string, n)
+
+ for i := 0; i < len(cmd.page); i++ {
+ if cmd.page[i], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+ NetworkingMetadata map[string]string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterSlot, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ n, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n < 2 {
+ return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ }
+
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ // subtract start and end.
+ nodes := make([]ClusterNode, n-2)
+
+ for j := 0; j < len(nodes); j++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 2 || nn > 4 {
+ return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n)
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if nn >= 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ nodes[j].ID = id
+ }
+
+ if nn >= 4 {
+ metadataLength, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ networkingMetadata := make(map[string]string, metadataLength)
+
+ for i := 0; i < metadataLength; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ networkingMetadata[key] = value
+ }
+
+ nodes[j].NetworkingMetadata = networkingMetadata
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+
+ // WithCoord+WithDist+WithGeoHash
+ withLen int
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ q.withLen++
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ q.withLen++
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ q.withLen++
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.locations = make([]GeoLocation, n)
+
+ for i := 0; i < len(cmd.locations); i++ {
+ // only name
+ if cmd.q.withLen == 0 {
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // +name
+ if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil {
+ return err
+ }
+
+ if cmd.locations[i].Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+ if cmd.q.WithDist {
+ if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithGeoHash {
+ if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ if cmd.q.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ loc.Longitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]*GeoPos, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ err = rd.ReadFixedArrayLen(2)
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return err
+ }
+
+ longitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+ latitude, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+ const numArgRedis7 = 10
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make(map[string]*CommandInfo, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ switch nn {
+ case numArgRedis5, numArgRedis6, numArgRedis7:
+ // ok
+ default:
+ return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn)
+ }
+
+ cmdInfo := &CommandInfo{}
+ if cmdInfo.Name, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ arity, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Arity = int8(arity)
+
+ flagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.Flags = make([]string, flagLen)
+ for f := 0; f < len(cmdInfo.Flags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.Flags[f] = ""
+ case err != nil:
+ return err
+ default:
+ if !cmdInfo.ReadOnly && s == "readonly" {
+ cmdInfo.ReadOnly = true
+ }
+ cmdInfo.Flags[f] = s
+ }
+ }
+
+ firstKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmdInfo.StepCount = int8(stepCount)
+
+ if nn >= numArgRedis6 {
+ aclFlagLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmdInfo.ACLFlags = make([]string, aclFlagLen)
+ for f := 0; f < len(cmdInfo.ACLFlags); f++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmdInfo.ACLFlags[f] = ""
+ case err != nil:
+ return err
+ default:
+ cmdInfo.ACLFlags[f] = s
+ }
+ }
+ }
+
+ if nn >= numArgRedis7 {
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ if err := rd.DiscardNext(); err != nil {
+ return err
+ }
+ }
+
+ cmd.val[cmdInfo.Name] = cmdInfo
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]SlowLog, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn < 4 {
+ return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn)
+ }
+
+ if cmd.val[i].ID, err = rd.ReadInt(); err != nil {
+ return err
+ }
+
+ createdAt, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Time = time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Duration = time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if cmdLen < 1 {
+ return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ }
+
+ cmd.val[i].Args = make([]string, cmdLen)
+ for f := 0; f < len(cmd.val[i].Args); f++ {
+ cmd.val[i].Args[f], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if nn >= 5 {
+ if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+
+ if nn >= 6 {
+ if cmd.val[i].ClientName, err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceCmd struct {
+ baseCmd
+
+ val map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceCmd)(nil)
+
+func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd {
+ return &MapStringInterfaceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make(map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err == Nil {
+ cmd.val[k] = Nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ cmd.val[k] = err
+ continue
+ }
+ return err
+ }
+ cmd.val[k] = v
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringStringSliceCmd struct {
+ baseCmd
+
+ val []map[string]string
+}
+
+var _ Cmder = (*MapStringStringSliceCmd)(nil)
+
+func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd {
+ return &MapStringStringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *MapStringStringSliceCmd) Val() []map[string]string {
+ return cmd.val
+}
+
+func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringStringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]string, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]string, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ v, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//-----------------------------------------------------------------------
+
+type MapStringInterfaceSliceCmd struct {
+ baseCmd
+
+ val []map[string]interface{}
+}
+
+var _ Cmder = (*MapStringInterfaceSliceCmd)(nil)
+
+func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd {
+ return &MapStringInterfaceSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) {
+ cmd.val = val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} {
+ return cmd.val
+}
+
+func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *MapStringInterfaceSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]map[string]interface{}, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i] = make(map[string]interface{}, nn)
+ for f := 0; f < nn; f++ {
+ k, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ v, err := rd.ReadReply()
+ if err != nil {
+ if err != Nil {
+ return err
+ }
+ }
+ cmd.val[i][k] = v
+ }
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type KeyValuesCmd struct {
+ baseCmd
+
+ key string
+ val []string
+}
+
+var _ Cmder = (*KeyValuesCmd)(nil)
+
+func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd {
+ return &KeyValuesCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyValuesCmd) SetVal(key string, val []string) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *KeyValuesCmd) Val() (string, []string) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *KeyValuesCmd) Result() (string, []string, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *KeyValuesCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]string, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceWithKeyCmd struct {
+ baseCmd
+
+ key string
+ val []Z
+}
+
+var _ Cmder = (*ZSliceWithKeyCmd)(nil)
+
+func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd {
+ return &ZSliceWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) {
+ cmd.key = key
+ cmd.val = val
+}
+
+func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) {
+ return cmd.key, cmd.val
+}
+
+func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) {
+ return cmd.key, cmd.val, cmd.err
+}
+
+func (cmd *ZSliceWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ cmd.key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ typ, err := rd.PeekReplyType()
+ if err != nil {
+ return err
+ }
+ array := typ == proto.RespArray
+
+ if array {
+ cmd.val = make([]Z, n)
+ } else {
+ cmd.val = make([]Z, n/2)
+ }
+
+ for i := 0; i < len(cmd.val); i++ {
+ if array {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+ }
+
+ if cmd.val[i].Member, err = rd.ReadString(); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Score, err = rd.ReadFloat(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type Function struct {
+ Name string
+ Description string
+ Flags []string
+}
+
+type Library struct {
+ Name string
+ Engine string
+ Functions []Function
+ Code string
+}
+
+type FunctionListCmd struct {
+ baseCmd
+
+ val []Library
+}
+
+var _ Cmder = (*FunctionListCmd)(nil)
+
+func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd {
+ return &FunctionListCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionListCmd) SetVal(val []Library) {
+ cmd.val = val
+}
+
+func (cmd *FunctionListCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionListCmd) Val() []Library {
+ return cmd.val
+}
+
+func (cmd *FunctionListCmd) Result() ([]Library, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionListCmd) First() (*Library, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ if len(cmd.val) > 0 {
+ return &cmd.val[0], nil
+ }
+ return nil, Nil
+}
+
+func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ libraries := make([]Library, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ library := Library{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "library_name":
+ library.Name, err = rd.ReadString()
+ case "engine":
+ library.Engine, err = rd.ReadString()
+ case "functions":
+ library.Functions, err = cmd.readFunctions(rd)
+ case "library_code":
+ library.Code, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ libraries[i] = library
+ }
+ cmd.val = libraries
+ return nil
+}
+
+func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ functions := make([]Function, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function := Function{}
+ for f := 0; f < nn; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ if function.Name, err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ case "description":
+ if function.Description, err = rd.ReadString(); err != nil && err != Nil {
+ return nil, err
+ }
+ case "flags":
+ // resp set
+ nx, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ function.Flags = make([]string, nx)
+ for j := 0; j < nx; j++ {
+ if function.Flags[j], err = rd.ReadString(); err != nil {
+ return nil, err
+ }
+ }
+ default:
+ return nil, fmt.Errorf("redis: function list unexpected key %s", key)
+ }
+ }
+
+ functions[i] = function
+ }
+ return functions, nil
+}
+
+// FunctionStats contains information about the scripts currently executing on the server, and the available engines
+// - Engines:
+// Statistics about the engine like number of functions and number of libraries
+// - RunningScript:
+// The script currently running on the shard we're connecting to.
+// For Redis Enterprise and Redis Cloud, this represents the
+// function with the longest running time, across all the running functions, on all shards
+// - RunningScripts
+// All scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+type FunctionStats struct {
+ Engines []Engine
+ isRunning bool
+ rs RunningScript
+ allrs []RunningScript
+}
+
+func (fs *FunctionStats) Running() bool {
+ return fs.isRunning
+}
+
+func (fs *FunctionStats) RunningScript() (RunningScript, bool) {
+ return fs.rs, fs.isRunning
+}
+
+// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database.
+// Only available on Redis Enterprise
+func (fs *FunctionStats) AllRunningScripts() []RunningScript {
+ return fs.allrs
+}
+
+type RunningScript struct {
+ Name string
+ Command []string
+ Duration time.Duration
+}
+
+type Engine struct {
+ Language string
+ LibrariesCount int64
+ FunctionsCount int64
+}
+
+type FunctionStatsCmd struct {
+ baseCmd
+ val FunctionStats
+}
+
+var _ Cmder = (*FunctionStatsCmd)(nil)
+
+func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd {
+ return &FunctionStatsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) {
+ cmd.val = val
+}
+
+func (cmd *FunctionStatsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FunctionStatsCmd) Val() FunctionStats {
+ return cmd.val
+}
+
+func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result FunctionStats
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "running_script":
+ result.rs, result.isRunning, err = cmd.readRunningScript(rd)
+ case "engines":
+ result.Engines, err = cmd.readEngines(rd)
+ case "all_running_scripts": // Redis Enterprise only
+ result.allrs, result.isRunning, err = cmd.readRunningScripts(rd)
+ default:
+ return fmt.Errorf("redis: function stats unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) {
+ err := rd.ReadFixedMapLen(3)
+ if err != nil {
+ if err == Nil {
+ return RunningScript{}, false, nil
+ }
+ return RunningScript{}, false, err
+ }
+
+ var runningScript RunningScript
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+
+ switch key {
+ case "name":
+ runningScript.Name, err = rd.ReadString()
+ case "duration_ms":
+ runningScript.Duration, err = cmd.readDuration(rd)
+ case "command":
+ runningScript.Command, err = cmd.readCommand(rd)
+ default:
+ return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key)
+ }
+
+ if err != nil {
+ return RunningScript{}, false, err
+ }
+ }
+
+ return runningScript, true, nil
+}
+
+func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return nil, err
+ }
+
+ engines := make([]Engine, 0, n)
+ for i := 0; i < n; i++ {
+ engine := Engine{}
+ engine.Language, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ err = rd.ReadFixedMapLen(2)
+ if err != nil {
+ return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language)
+ }
+
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ switch key {
+ case "libraries_count":
+ engine.LibrariesCount, err = rd.ReadInt()
+ case "functions_count":
+ engine.FunctionsCount, err = rd.ReadInt()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ engines = append(engines, engine)
+ }
+ return engines, nil
+}
+
+func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) {
+ t, err := rd.ReadInt()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ command := make([]string, 0, n)
+ for i := 0; i < n; i++ {
+ x, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ command = append(command, x)
+ }
+
+ return command, nil
+}
+
+func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, false, err
+ }
+
+ runningScripts := make([]RunningScript, 0, n)
+ for i := 0; i < n; i++ {
+ rs, _, err := cmd.readRunningScript(rd)
+ if err != nil {
+ return nil, false, err
+ }
+ runningScripts = append(runningScripts, rs)
+ }
+
+ return runningScripts, len(runningScripts) > 0, nil
+}
+
+//------------------------------------------------------------------------------
+
+// LCSQuery is a parameter used for the LCS command
+type LCSQuery struct {
+ Key1 string
+ Key2 string
+ Len bool
+ Idx bool
+ MinMatchLen int
+ WithMatchLen bool
+}
+
+// LCSMatch is the result set of the LCS command.
+type LCSMatch struct {
+ MatchString string
+ Matches []LCSMatchedPosition
+ Len int64
+}
+
+type LCSMatchedPosition struct {
+ Key1 LCSPosition
+ Key2 LCSPosition
+
+ // only for withMatchLen is true
+ MatchLen int64
+}
+
+type LCSPosition struct {
+ Start int64
+ End int64
+}
+
+type LCSCmd struct {
+ baseCmd
+
+ // 1: match string
+ // 2: match len
+ // 3: match idx LCSMatch
+ readType uint8
+ val *LCSMatch
+}
+
+func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd {
+ args := make([]interface{}, 3, 7)
+ args[0] = "lcs"
+ args[1] = q.Key1
+ args[2] = q.Key2
+
+ cmd := &LCSCmd{readType: 1}
+ if q.Len {
+ cmd.readType = 2
+ args = append(args, "len")
+ } else if q.Idx {
+ cmd.readType = 3
+ args = append(args, "idx")
+ if q.MinMatchLen != 0 {
+ args = append(args, "minmatchlen", q.MinMatchLen)
+ }
+ if q.WithMatchLen {
+ args = append(args, "withmatchlen")
+ }
+ }
+ cmd.baseCmd = baseCmd{
+ ctx: ctx,
+ args: args,
+ }
+
+ return cmd
+}
+
+func (cmd *LCSCmd) SetVal(val *LCSMatch) {
+ cmd.val = val
+}
+
+func (cmd *LCSCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *LCSCmd) Val() *LCSMatch {
+ return cmd.val
+}
+
+func (cmd *LCSCmd) Result() (*LCSMatch, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) {
+ lcs := &LCSMatch{}
+ switch cmd.readType {
+ case 1:
+ // match string
+ if lcs.MatchString, err = rd.ReadString(); err != nil {
+ return err
+ }
+ case 2:
+ // match len
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ case 3:
+ // read LCSMatch
+ if err = rd.ReadFixedMapLen(2); err != nil {
+ return err
+ }
+
+ // read matches or len field
+ for i := 0; i < 2; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "matches":
+ // read array of matched positions
+ if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil {
+ return err
+ }
+ case "len":
+ // read match length
+ if lcs.Len, err = rd.ReadInt(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ cmd.val = lcs
+ return nil
+}
+
+func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ positions := make([]LCSMatchedPosition, n)
+ for i := 0; i < n; i++ {
+ pn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ if positions[i].Key1, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+ if positions[i].Key2, err = cmd.readPosition(rd); err != nil {
+ return nil, err
+ }
+
+ // read match length if WithMatchLen is true
+ if pn > 2 {
+ if positions[i].MatchLen, err = rd.ReadInt(); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return positions, nil
+}
+
+func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) {
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return pos, err
+ }
+ if pos.Start, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+ if pos.End, err = rd.ReadInt(); err != nil {
+ return pos, err
+ }
+
+ return pos, nil
+}
+
+// ------------------------------------------------------------------------
+
+type KeyFlags struct {
+ Key string
+ Flags []string
+}
+
+type KeyFlagsCmd struct {
+ baseCmd
+
+ val []KeyFlags
+}
+
+var _ Cmder = (*KeyFlagsCmd)(nil)
+
+func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd {
+ return &KeyFlagsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) {
+ cmd.val = val
+}
+
+func (cmd *KeyFlagsCmd) Val() []KeyFlags {
+ return cmd.val
+}
+
+func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *KeyFlagsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ cmd.val = make([]KeyFlags, 0)
+ return nil
+ }
+
+ cmd.val = make([]KeyFlags, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+
+ if err = rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ if cmd.val[i].Key, err = rd.ReadString(); err != nil {
+ return err
+ }
+ flagsLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Flags = make([]string, flagsLen)
+
+ for j := 0; j < flagsLen; j++ {
+ if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ---------------------------------------------------------------------------------------------------
+
+type ClusterLink struct {
+ Direction string
+ Node string
+ CreateTime int64
+ Events string
+ SendBufferAllocated int64
+ SendBufferUsed int64
+}
+
+type ClusterLinksCmd struct {
+ baseCmd
+
+ val []ClusterLink
+}
+
+var _ Cmder = (*ClusterLinksCmd)(nil)
+
+func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd {
+ return &ClusterLinksCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) {
+ cmd.val = val
+}
+
+func (cmd *ClusterLinksCmd) Val() []ClusterLink {
+ return cmd.val
+}
+
+func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterLinksCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterLink, n)
+
+ for i := 0; i < len(cmd.val); i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "direction":
+ cmd.val[i].Direction, err = rd.ReadString()
+ case "node":
+ cmd.val[i].Node, err = rd.ReadString()
+ case "create-time":
+ cmd.val[i].CreateTime, err = rd.ReadInt()
+ case "events":
+ cmd.val[i].Events, err = rd.ReadString()
+ case "send-buffer-allocated":
+ cmd.val[i].SendBufferAllocated, err = rd.ReadInt()
+ case "send-buffer-used":
+ cmd.val[i].SendBufferUsed, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ------------------------------------------------------------------------------------------------------------------
+
+type SlotRange struct {
+ Start int64
+ End int64
+}
+
+type Node struct {
+ ID string
+ Endpoint string
+ IP string
+ Hostname string
+ Port int64
+ TLSPort int64
+ Role string
+ ReplicationOffset int64
+ Health string
+}
+
+type ClusterShard struct {
+ Slots []SlotRange
+ Nodes []Node
+}
+
+type ClusterShardsCmd struct {
+ baseCmd
+
+ val []ClusterShard
+}
+
+var _ Cmder = (*ClusterShardsCmd)(nil)
+
+func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd {
+ return &ClusterShardsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) {
+ cmd.val = val
+}
+
+func (cmd *ClusterShardsCmd) Val() []ClusterShard {
+ return cmd.val
+}
+
+func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClusterShardsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]ClusterShard, n)
+
+ for i := 0; i < n; i++ {
+ m, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for j := 0; j < m; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "slots":
+ l, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ for k := 0; k < l; k += 2 {
+ start, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ end, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end})
+ }
+ case "nodes":
+ nodesLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Nodes = make([]Node, nodesLen)
+ for k := 0; k < nodesLen; k++ {
+ nodeMapLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ for l := 0; l < nodeMapLen; l++ {
+ nodeKey, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch nodeKey {
+ case "id":
+ cmd.val[i].Nodes[k].ID, err = rd.ReadString()
+ case "endpoint":
+ cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString()
+ case "ip":
+ cmd.val[i].Nodes[k].IP, err = rd.ReadString()
+ case "hostname":
+ cmd.val[i].Nodes[k].Hostname, err = rd.ReadString()
+ case "port":
+ cmd.val[i].Nodes[k].Port, err = rd.ReadInt()
+ case "tls-port":
+ cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt()
+ case "role":
+ cmd.val[i].Nodes[k].Role, err = rd.ReadString()
+ case "replication-offset":
+ cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt()
+ case "health":
+ cmd.val[i].Nodes[k].Health, err = rd.ReadString()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key)
+ }
+ }
+ }
+
+ return nil
+}
+
+// -----------------------------------------
+
+type RankScore struct {
+ Rank int64
+ Score float64
+}
+
+type RankWithScoreCmd struct {
+ baseCmd
+
+ val RankScore
+}
+
+var _ Cmder = (*RankWithScoreCmd)(nil)
+
+func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd {
+ return &RankWithScoreCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *RankWithScoreCmd) SetVal(val RankScore) {
+ cmd.val = val
+}
+
+func (cmd *RankWithScoreCmd) Val() RankScore {
+ return cmd.val
+}
+
+func (cmd *RankWithScoreCmd) Result() (RankScore, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *RankWithScoreCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error {
+ if err := rd.ReadFixedArrayLen(2); err != nil {
+ return err
+ }
+
+ rank, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+
+ score, err := rd.ReadFloat()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = RankScore{Rank: rank, Score: score}
+
+ return nil
+}
+
+// --------------------------------------------------------------------------------------------------
+
+// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0)
+type ClientFlags uint64
+
+const (
+ ClientSlave ClientFlags = 1 << 0 /* This client is a replica */
+ ClientMaster ClientFlags = 1 << 1 /* This client is a master */
+ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */
+ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */
+ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */
+ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */
+ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */
+ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */
+ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */
+ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */
+ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */
+ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */
+ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */
+ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */
+ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */
+ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */
+ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */
+ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */
+ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */
+ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */
+ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */
+ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp
+ ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */
+ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */
+ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */
+ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */
+ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */
+ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */
+ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */
+ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */
+ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling
+ a command. usually this will be marked only during call()
+ however, blocked clients might have this flag kept until they
+ will try to reprocess the command. */
+ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */
+ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */
+ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */
+ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */
+ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */
+ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */
+ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */
+ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/
+ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */
+ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */
+ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */
+ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */
+ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */
+ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */
+ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */
+ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */
+ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */
+)
+
+// ClientInfo is redis-server ClientInfo, not go-redis *Client
+type ClientInfo struct {
+ ID int64 // redis version 2.8.12, a unique 64-bit client ID
+ Addr string // address/port of the client
+ LAddr string // address/port of local address client connected to (bind address)
+ FD int64 // file descriptor corresponding to the socket
+ Name string // the name set by the client with CLIENT SETNAME
+ Age time.Duration // total duration of the connection in seconds
+ Idle time.Duration // idle time of the connection in seconds
+ Flags ClientFlags // client flags (see below)
+ DB int // current database ID
+ Sub int // number of channel subscriptions
+ PSub int // number of pattern matching subscriptions
+ SSub int // redis version 7.0.3, number of shard channel subscriptions
+ Multi int // number of commands in a MULTI/EXEC context
+ QueryBuf int // qbuf, query buffer length (0 means no query pending)
+ QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full)
+ ArgvMem int // incomplete arguments for the next command (already extracted from query buffer)
+ MultiMem int // redis version 7.0, memory is used up by buffered multi commands
+ BufferSize int // rbs, usable size of buffer
+ BufferPeak int // rbp, peak used size of buffer in last 5 sec interval
+ OutputBufferLength int // obl, output buffer length
+ OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full)
+ OutputMemory int // omem, output buffer memory usage
+ TotalMemory int // tot-mem, total memory consumed by this client in its various buffers
+ Events string // file descriptor events (see below)
+ LastCmd string // cmd, last command played
+ User string // the authenticated username of the client
+ Redir int64 // client id of current client tracking redirection
+ Resp int // redis version 7.0, client RESP protocol version
+ LibName string // redis version 7.2, client library name
+ LibVer string // redis version 7.2, client library version
+}
+
+type ClientInfoCmd struct {
+ baseCmd
+
+ val *ClientInfo
+}
+
+var _ Cmder = (*ClientInfoCmd)(nil)
+
+func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd {
+ return &ClientInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) {
+ cmd.val = val
+}
+
+func (cmd *ClientInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClientInfoCmd) Val() *ClientInfo {
+ return cmd.val
+}
+
+func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) {
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ // sds o = catClientInfoString(sdsempty(), c);
+ // o = sdscatlen(o,"\n",1);
+ // addReplyVerbatim(c,o,sdslen(o),"txt");
+ // sdsfree(o);
+ cmd.val, err = parseClientInfo(strings.TrimSpace(txt))
+ return err
+}
+
+// fmt.Sscanf() cannot handle null values
+func parseClientInfo(txt string) (info *ClientInfo, err error) {
+ info = &ClientInfo{}
+ for _, s := range strings.Split(txt, " ") {
+ kv := strings.Split(s, "=")
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("redis: unexpected client info data (%s)", s)
+ }
+ key, val := kv[0], kv[1]
+
+ switch key {
+ case "id":
+ info.ID, err = strconv.ParseInt(val, 10, 64)
+ case "addr":
+ info.Addr = val
+ case "laddr":
+ info.LAddr = val
+ case "fd":
+ info.FD, err = strconv.ParseInt(val, 10, 64)
+ case "name":
+ info.Name = val
+ case "age":
+ var age int
+ if age, err = strconv.Atoi(val); err == nil {
+ info.Age = time.Duration(age) * time.Second
+ }
+ case "idle":
+ var idle int
+ if idle, err = strconv.Atoi(val); err == nil {
+ info.Idle = time.Duration(idle) * time.Second
+ }
+ case "flags":
+ if val == "N" {
+ break
+ }
+
+ for i := 0; i < len(val); i++ {
+ switch val[i] {
+ case 'S':
+ info.Flags |= ClientSlave
+ case 'O':
+ info.Flags |= ClientSlave | ClientMonitor
+ case 'M':
+ info.Flags |= ClientMaster
+ case 'P':
+ info.Flags |= ClientPubSub
+ case 'x':
+ info.Flags |= ClientMulti
+ case 'b':
+ info.Flags |= ClientBlocked
+ case 't':
+ info.Flags |= ClientTracking
+ case 'R':
+ info.Flags |= ClientTrackingBrokenRedir
+ case 'B':
+ info.Flags |= ClientTrackingBCAST
+ case 'd':
+ info.Flags |= ClientDirtyCAS
+ case 'c':
+ info.Flags |= ClientCloseAfterCommand
+ case 'u':
+ info.Flags |= ClientUnBlocked
+ case 'A':
+ info.Flags |= ClientCloseASAP
+ case 'U':
+ info.Flags |= ClientUnixSocket
+ case 'r':
+ info.Flags |= ClientReadOnly
+ case 'e':
+ info.Flags |= ClientNoEvict
+ case 'T':
+ info.Flags |= ClientNoTouch
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i]))
+ }
+ }
+ case "db":
+ info.DB, err = strconv.Atoi(val)
+ case "sub":
+ info.Sub, err = strconv.Atoi(val)
+ case "psub":
+ info.PSub, err = strconv.Atoi(val)
+ case "ssub":
+ info.SSub, err = strconv.Atoi(val)
+ case "multi":
+ info.Multi, err = strconv.Atoi(val)
+ case "qbuf":
+ info.QueryBuf, err = strconv.Atoi(val)
+ case "qbuf-free":
+ info.QueryBufFree, err = strconv.Atoi(val)
+ case "argv-mem":
+ info.ArgvMem, err = strconv.Atoi(val)
+ case "multi-mem":
+ info.MultiMem, err = strconv.Atoi(val)
+ case "rbs":
+ info.BufferSize, err = strconv.Atoi(val)
+ case "rbp":
+ info.BufferPeak, err = strconv.Atoi(val)
+ case "obl":
+ info.OutputBufferLength, err = strconv.Atoi(val)
+ case "oll":
+ info.OutputListLength, err = strconv.Atoi(val)
+ case "omem":
+ info.OutputMemory, err = strconv.Atoi(val)
+ case "tot-mem":
+ info.TotalMemory, err = strconv.Atoi(val)
+ case "events":
+ info.Events = val
+ case "cmd":
+ info.LastCmd = val
+ case "user":
+ info.User = val
+ case "redir":
+ info.Redir, err = strconv.ParseInt(val, 10, 64)
+ case "resp":
+ info.Resp, err = strconv.Atoi(val)
+ case "lib-name":
+ info.LibName = val
+ case "lib-ver":
+ info.LibVer = val
+ default:
+ return nil, fmt.Errorf("redis: unexpected client info key(%s)", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return info, nil
+}
+
+// -------------------------------------------
+
+type ACLLogEntry struct {
+ Count int64
+ Reason string
+ Context string
+ Object string
+ Username string
+ AgeSeconds float64
+ ClientInfo *ClientInfo
+ EntryID int64
+ TimestampCreated int64
+ TimestampLastUpdated int64
+}
+
+type ACLLogCmd struct {
+ baseCmd
+
+ val []*ACLLogEntry
+}
+
+var _ Cmder = (*ACLLogCmd)(nil)
+
+func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd {
+ return &ACLLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) {
+ cmd.val = val
+}
+
+func (cmd *ACLLogCmd) Val() []*ACLLogEntry {
+ return cmd.val
+}
+
+func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ACLLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]*ACLLogEntry, n)
+ for i := 0; i < n; i++ {
+ cmd.val[i] = &ACLLogEntry{}
+ entry := cmd.val[i]
+ respLen, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ for j := 0; j < respLen; j++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "count":
+ entry.Count, err = rd.ReadInt()
+ case "reason":
+ entry.Reason, err = rd.ReadString()
+ case "context":
+ entry.Context, err = rd.ReadString()
+ case "object":
+ entry.Object, err = rd.ReadString()
+ case "username":
+ entry.Username, err = rd.ReadString()
+ case "age-seconds":
+ entry.AgeSeconds, err = rd.ReadFloat()
+ case "client-info":
+ txt, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt))
+ if err != nil {
+ return err
+ }
+ case "entry-id":
+ entry.EntryID, err = rd.ReadInt()
+ case "timestamp-created":
+ entry.TimestampCreated, err = rd.ReadInt()
+ case "timestamp-last-updated":
+ entry.TimestampLastUpdated, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// LibraryInfo holds the library info.
+type LibraryInfo struct {
+ LibName *string
+ LibVer *string
+}
+
+// WithLibraryName returns a valid LibraryInfo with library name only.
+func WithLibraryName(libName string) LibraryInfo {
+ return LibraryInfo{LibName: &libName}
+}
+
+// WithLibraryVersion returns a valid LibraryInfo with library version only.
+func WithLibraryVersion(libVer string) LibraryInfo {
+ return LibraryInfo{LibVer: &libVer}
+}
+
+// -------------------------------------------
+
+type InfoCmd struct {
+ baseCmd
+ val map[string]map[string]string
+}
+
+var _ Cmder = (*InfoCmd)(nil)
+
+func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd {
+ return &InfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *InfoCmd) SetVal(val map[string]map[string]string) {
+ cmd.val = val
+}
+
+func (cmd *InfoCmd) Val() map[string]map[string]string {
+ return cmd.val
+}
+
+func (cmd *InfoCmd) Result() (map[string]map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *InfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *InfoCmd) readReply(rd *proto.Reader) error {
+ val, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ section := ""
+ scanner := bufio.NewScanner(strings.NewReader(val))
+ moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ if cmd.val == nil {
+ cmd.val = make(map[string]map[string]string)
+ }
+ section = strings.TrimPrefix(line, "# ")
+ cmd.val[section] = make(map[string]string)
+ } else if line != "" {
+ if section == "Modules" {
+ kv := moduleRe.FindStringSubmatch(line)
+ if len(kv) == 3 {
+ cmd.val[section][kv[1]] = kv[2]
+ }
+ } else {
+ kv := strings.SplitN(line, ":", 2)
+ if len(kv) == 2 {
+ cmd.val[section][kv[0]] = kv[1]
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (cmd *InfoCmd) Item(section, key string) string {
+ if cmd.val == nil {
+ return ""
+ } else if cmd.val[section] == nil {
+ return ""
+ } else {
+ return cmd.val[section][key]
+ }
+}
+
+type MonitorStatus int
+
+const (
+ monitorStatusIdle MonitorStatus = iota
+ monitorStatusStart
+ monitorStatusStop
+)
+
+type MonitorCmd struct {
+ baseCmd
+ ch chan string
+ status MonitorStatus
+ mu sync.Mutex
+}
+
+func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd {
+ return &MonitorCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"monitor"},
+ },
+ ch: ch,
+ status: monitorStatusIdle,
+ mu: sync.Mutex{},
+ }
+}
+
+func (cmd *MonitorCmd) String() string {
+ return cmdString(cmd, nil)
+}
+
+func (cmd *MonitorCmd) readReply(rd *proto.Reader) error {
+ ctx, cancel := context.WithCancel(cmd.ctx)
+ go func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := cmd.readMonitor(rd, cancel)
+ if err != nil {
+ cmd.err = err
+ return
+ }
+ }
+ }
+ }(ctx)
+ return nil
+}
+
+func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error {
+ for {
+ cmd.mu.Lock()
+ st := cmd.status
+ cmd.mu.Unlock()
+ if pk, _ := rd.Peek(1); len(pk) != 0 && st == monitorStatusStart {
+ line, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.ch <- line
+ }
+ if st == monitorStatusStop {
+ cancel()
+ break
+ }
+ }
+ return nil
+}
+
+func (cmd *MonitorCmd) Start() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStart
+}
+
+func (cmd *MonitorCmd) Stop() {
+ cmd.mu.Lock()
+ defer cmd.mu.Unlock()
+ cmd.status = monitorStatusStop
+}
diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go
new file mode 100644
index 0000000000..db59594469
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/commands.go
@@ -0,0 +1,718 @@
+package redis
+
+import (
+ "context"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP:
+ return append(dst, arg)
+ default:
+ // scan struct field
+ v := reflect.ValueOf(arg)
+ if v.Type().Kind() == reflect.Ptr {
+ if v.IsNil() {
+ // error: arg is not a valid object
+ return dst
+ }
+ v = v.Elem()
+ }
+
+ if v.Type().Kind() == reflect.Struct {
+ return appendStructField(dst, v)
+ }
+
+ return append(dst, arg)
+ }
+}
+
+// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst.
+func appendStructField(dst []interface{}, v reflect.Value) []interface{} {
+ typ := v.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ tag := typ.Field(i).Tag.Get("redis")
+ if tag == "" || tag == "-" {
+ continue
+ }
+ name, opt, _ := strings.Cut(tag, ",")
+ if name == "" {
+ continue
+ }
+
+ field := v.Field(i)
+
+ // miss field
+ if omitEmpty(opt) && isEmptyValue(field) {
+ continue
+ }
+
+ if field.CanInterface() {
+ dst = append(dst, name, field.Interface())
+ }
+ }
+
+ return dst
+}
+
+func omitEmpty(opt string) bool {
+ for opt != "" {
+ var name string
+ name, opt, _ = strings.Cut(opt, ",")
+ if name == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Pointer:
+ return v.IsNil()
+ }
+ return false
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd
+ CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd
+ CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientInfo(ctx context.Context) *ClientInfoCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientUnpause(ctx context.Context) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ClientUnblock(ctx context.Context, id int64) *IntCmd
+ ClientUnblockWithError(ctx context.Context, id int64) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ SlowLogGet(ctx context.Context, num int64) *SlowLogCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd
+
+ ACLCmdable
+ BitMapCmdable
+ ClusterCmdable
+ GearsCmdable
+ GenericCmdable
+ GeoCmdable
+ HashCmdable
+ HyperLogLogCmdable
+ ListCmdable
+ ProbabilisticCmdable
+ PubSubCmdable
+ ScriptingFunctionsCmdable
+ SetCmdable
+ SortedSetCmdable
+ StringCmdable
+ StreamCmdable
+ TimeseriesCmdable
+ JSONCmdable
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+ ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd
+ Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetInfo sends a CLIENT SETINFO command with the provided info.
+func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd {
+ err := info.Validate()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ var cmd *StatusCmd
+ if info.LibName != nil {
+ libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version()))
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName)
+ } else {
+ cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer)
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Validate checks if only one field in the struct is non-nil.
+func (info LibraryInfo) Validate() error {
+ if info.LibName != nil && info.LibVer != nil {
+ return errors.New("both LibName and LibVer cannot be set at the same time")
+ }
+ if info.LibName == nil && info.LibVer == nil {
+ return errors.New("at least one of LibName and LibVer should be set")
+ }
+ return nil
+}
+
+// Hello Set the resp protocol used.
+func (c statefulCmdable) Hello(ctx context.Context,
+ ver int, username, password, clientName string,
+) *MapStringInterfaceCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "hello", ver)
+ if password != "" {
+ if username != "" {
+ args = append(args, "auth", username, password)
+ } else {
+ args = append(args, "auth", "default", password)
+ }
+ }
+ if clientName != "" {
+ args = append(args, "setname", clientName)
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FilterBy is used for the `CommandList` command parameter.
+type FilterBy struct {
+ Module string
+ ACLCat string
+ Pattern string
+}
+
+func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd {
+ args := make([]interface{}, 0, 5)
+ args = append(args, "command", "list")
+ if filter != nil {
+ if filter.Module != "" {
+ args = append(args, "filterby", "module", filter.Module)
+ } else if filter.ACLCat != "" {
+ args = append(args, "filterby", "aclcat", filter.ACLCat)
+ } else if filter.Pattern != "" {
+ args = append(args, "filterby", "pattern", filter.Pattern)
+ }
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeys"
+ copy(args[2:], commands)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd {
+ args := make([]interface{}, 2+len(commands))
+ args[0] = "command"
+ args[1] = "getkeysandflags"
+ copy(args[2:], commands)
+ cmd := NewKeyFlagsCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL ?db=
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Examples:
+//
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ h, p := getHostPortWithDefaults(u)
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ var err error
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{
+ ServerName: h,
+ MinVersion: tls.VersionTLS12,
+ }
+ }
+
+ return setupConnParams(u, o)
+}
+
+// getHostPortWithDefaults is a helper function that splits the url into
+// a host and a port. If the host is missing, it defaults to localhost
+// and if the port is missing, it defaults to 6379.
+func getHostPortWithDefaults(u *url.URL) (string, string) {
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ if port == "" {
+ port = "6379"
+ }
+ return host, port
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+ o.Username, o.Password = getUserPassword(u)
+ return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+ q url.Values
+ err error
+}
+
+func (o *queryOptions) has(name string) bool {
+ return len(o.q[name]) > 0
+}
+
+func (o *queryOptions) string(name string) string {
+ vs := o.q[name]
+ if len(vs) == 0 {
+ return ""
+ }
+ delete(o.q, name) // enable detection of unknown parameters
+ return vs[len(vs)-1]
+}
+
+func (o *queryOptions) strings(name string) []string {
+ vs := o.q[name]
+ delete(o.q, name)
+ return vs
+}
+
+func (o *queryOptions) int(name string) int {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ i, err := strconv.Atoi(s)
+ if err == nil {
+ return i
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ // try plain number first
+ if i, err := strconv.Atoi(s); err == nil {
+ if i <= 0 {
+ // disable timeouts
+ return -1
+ }
+ return time.Duration(i) * time.Second
+ }
+ dur, err := time.ParseDuration(s)
+ if err == nil {
+ return dur
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+ switch s := o.string(name); s {
+ case "true", "1":
+ return true
+ case "false", "0", "":
+ return false
+ default:
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+ }
+ return false
+ }
+}
+
+func (o *queryOptions) remaining() []string {
+ if len(o.q) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(o.q))
+ for k := range o.q {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+ q := queryOptions{q: u.Query()}
+
+ // compat: a future major release may use q.int("db")
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+ }
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ if q.has("conn_max_idle_time") {
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+ } else {
+ o.ConnMaxIdleTime = q.duration("idle_timeout")
+ }
+ if q.has("conn_max_lifetime") {
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ } else {
+ o.ConnMaxLifetime = q.duration("max_conn_age")
+ }
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(
+ opt *Options,
+ dialer func(ctx context.Context, network, addr string) (net.Conn, error),
+) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return dialer(ctx, opt.Network, opt.Addr)
+ },
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ })
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go
new file mode 100644
index 0000000000..17f98d9dc8
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster.go
@@ -0,0 +1,1913 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "net/url"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ CredentialsProvider func() (username string, password string)
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+ PoolSize int // applies per cluster node and not for the whole cluster
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int // applies per cluster node and not for the whole cluster
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+ DisableIndentity bool // Disable set-lib on connect. Default is false.
+
+ IdentitySuffix string // Add suffix to client name. Default is empty.
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 3
+ }
+
+ if opt.RouteByLatency || opt.RouteRandomly {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis.
+// The URL must be in the form:
+//
+// redis://:@:
+// or
+// rediss://:@:
+//
+// To add additional addresses, specify the query parameter, "addr" one or more times. e.g:
+//
+// redis://:@:?addr=:&addr=:
+// or
+// rediss://:@:?addr=:&addr=:
+//
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+//
+// Example:
+//
+// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791
+// is equivalent to:
+// &ClusterOptions{
+// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"]
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// }
+func ParseClusterURL(redisURL string) (*ClusterOptions, error) {
+ o := &ClusterOptions{}
+
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ // add base URL to the array of addresses
+ // more addresses may be added through the URL params
+ h, p := getHostPortWithDefaults(u)
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+
+ // setup username, password, and other configurations
+ o, err = setupClusterConn(u, h, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterConn gets the username and password from the URL and the query parameters.
+func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) {
+ switch u.Scheme {
+ case "rediss":
+ o.TLSConfig = &tls.Config{ServerName: host}
+ fallthrough
+ case "redis":
+ o.Username, o.Password = getUserPassword(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+
+ // retrieve the configuration from the query parameters
+ o, err := setupClusterQueryParams(u, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// setupClusterQueryParams converts query parameters in u to option value in o.
+func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) {
+ q := queryOptions{q: u.Query()}
+
+ o.Protocol = q.int("protocol")
+ o.ClientName = q.string("client_name")
+ o.MaxRedirects = q.int("max_redirects")
+ o.ReadOnly = q.bool("read_only")
+ o.RouteByLatency = q.bool("route_by_latency")
+ o.RouteRandomly = q.bool("route_randomly")
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxIdleConns = q.int("max_idle_conns")
+ o.MaxActiveConns = q.int("max_active_conns")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.ConnMaxLifetime = q.duration("conn_max_lifetime")
+ o.ConnMaxIdleTime = q.duration("conn_max_idle_time")
+
+ if q.err != nil {
+ return nil, q.err
+ }
+
+ // addr can be specified as many times as needed
+ addrs := q.strings("addr")
+ for _, addr := range addrs {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil || h == "" || p == "" {
+ return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr)
+ }
+
+ o.Addrs = append(o.Addrs, net.JoinHostPort(h, p))
+ }
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
+
+ return o, nil
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ CredentialsProvider: opt.CredentialsProvider,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ successes := 0
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ err := n.Client.Ping(context.TODO()).Err()
+ if err == nil {
+ dur += uint64(time.Since(start) / time.Microsecond)
+ successes++
+ }
+ }
+
+ var latency float64
+ if successes == 0 {
+ // If none of the pings worked, set latency to some arbitrarily high value so this node gets
+ // least priority.
+ latency = float64((1 * time.Minute) / time.Microsecond)
+ } else {
+ latency = float64(dur) / float64(successes)
+ }
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+ onNewNode []func(rdb *Client)
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+
+ c.mu.RLock()
+ closed := c.closed //nolint:ifshort
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = c.activeAddrs
+ } else {
+ addrs = c.addrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+ for _, fn := range c.onNewNode {
+ fn(node.Client)
+ }
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Failing() {
+ continue
+ }
+ if node == nil || n.Latency() < node.Latency() {
+ node = n
+ }
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ // If all nodes are failing - return random node
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ if len(nodes) == 1 {
+ return nodes[0], nil
+ }
+ randomNodes := rand.Perm(len(nodes))
+ for _, idx := range randomNodes {
+ if node := nodes[idx]; !node.Failing() {
+ return node, nil
+ }
+ }
+ return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(context.Background())
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v == nil {
+ return c.Reload(ctx)
+ }
+
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload()
+ }
+ return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
+ cmdable
+ hooksMixin
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ }
+
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ c.initHooks(hooks{
+ dial: nil,
+ process: c.process,
+ pipeline: c.processPipeline,
+ txPipeline: c.processTxPipeline,
+ })
+
+ return c
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ slot := c.cmdSlot(ctx, cmd)
+ var node *clusterNode
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmd.Name(), slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ ask = false
+
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ c.state.LazyReload()
+
+ var err error
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) {
+ c.nodes.OnNewNode(fn)
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+
+ if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly {
+ node.MarkAsFailing()
+ }
+
+ if !isRedisError(err) {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+
+ if err := cmds[0].Err(); err != nil && shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ return err
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload()
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(ctx, cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+ c.processTxPipelineNode(ctx, node, cmds, failedCmds)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(ctx, cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ cn, err := node.Client.getConn(ctx)
+ if err != nil {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var processErr error
+ defer func() {
+ node.Client.releaseConn(ctx, cn, processErr)
+ }()
+ processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds)
+
+ return processErr
+ })
+}
+
+func (c *ClusterClient) processTxPipelineNodeConn(
+ ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ if shouldRetry(err, true) {
+ _ = c.mapCmdsByNode(ctx, failedCmds, cmds)
+ }
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := c.txPipelineReadQueued(
+ ctx, rd, statusCmd, trimmedCmds, failedCmds,
+ ); err != nil {
+ setCmdsErr(cmds, err)
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds)
+ }
+
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload()
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload()
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdName string,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.opt.ReadOnly {
+ cmdInfo := c.cmdInfo(ctx, cmdName)
+ if cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+func (c *ClusterClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/osscluster_commands.go b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
new file mode 100644
index 0000000000..b13f8e7e9d
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/osscluster_commands.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = size
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptLoad(ctx, script).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ if cmd.Val() == "" {
+ cmd.val = val
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ return shard.ScriptFlush(ctx).Err()
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+
+ result := make([]bool, len(hashes))
+ for i := range result {
+ result[i] = true
+ }
+
+ _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var mu sync.Mutex
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptExists(ctx, hashes...).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ for i, v := range val {
+ result[i] = result[i] && v
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = result
+ }
+ return nil
+ })
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/pipeline.go b/vendor/github.com/redis/go-redis/v9/pipeline.go
new file mode 100644
index 0000000000..1c114205c0
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pipeline.go
@@ -0,0 +1,121 @@
+package redis
+
+import (
+ "context"
+ "errors"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// single step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+
+ // Len is to obtain the number of commands in the pipeline that have not yet been executed.
+ Len() int
+
+ // Do is an API for executing any command.
+ // If a certain Redis command is not yet supported, you can use Do to execute it.
+ Do(ctx context.Context, args ...interface{}) *Cmd
+
+ // Process is to put the commands to be executed into the pipeline buffer.
+ Process(ctx context.Context, cmd Cmder) error
+
+ // Discard is to discard all commands in the cache that have not yet been executed.
+ Discard()
+
+ // Exec is to send all the commands buffered in the pipeline to the redis-server.
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining.
+// Please note: it is not safe for concurrent use by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ exec pipelineExecer
+ cmds []Cmder
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+ return len(c.cmds)
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ if len(args) == 0 {
+ cmd.SetErr(errors.New("redis: please enter the command to be executed"))
+ return cmd
+ }
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.cmds = append(c.cmds, cmd)
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() {
+ c.cmds = c.cmds[:0]
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ return c.Exec(ctx)
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go
new file mode 100644
index 0000000000..5d5cd1a628
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go
@@ -0,0 +1,1429 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type ProbabilisticCmdable interface {
+ BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFCard(ctx context.Context, key string) *IntCmd
+ BFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ BFInfo(ctx context.Context, key string) *BFInfoCmd
+ BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd
+ BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd
+ BFInfoSize(ctx context.Context, key string) *BFInfoCmd
+ BFInfoFilters(ctx context.Context, key string) *BFInfoCmd
+ BFInfoItems(ctx context.Context, key string) *BFInfoCmd
+ BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd
+ BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd
+ BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd
+ BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd
+ BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFCount(ctx context.Context, key string, element interface{}) *IntCmd
+ CFDel(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFExists(ctx context.Context, key string, element interface{}) *BoolCmd
+ CFInfo(ctx context.Context, key string) *CFInfoCmd
+ CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd
+ CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd
+ CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd
+ CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd
+ CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd
+ CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd
+ CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd
+ CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd
+ CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd
+
+ CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ CMSInfo(ctx context.Context, key string) *CMSInfoCmd
+ CMSInitByDim(ctx context.Context, key string, width, height int64) *StatusCmd
+ CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd
+ CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd
+ CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd
+ CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+
+ TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd
+ TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd
+ TopKInfo(ctx context.Context, key string) *TopKInfoCmd
+ TopKList(ctx context.Context, key string) *StringSliceCmd
+ TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd
+ TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd
+ TopKReserve(ctx context.Context, key string, k int64) *StatusCmd
+ TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd
+
+ TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd
+ TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd
+ TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestCreate(ctx context.Context, key string) *StatusCmd
+ TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd
+ TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd
+ TDigestMax(ctx context.Context, key string) *FloatCmd
+ TDigestMin(ctx context.Context, key string) *FloatCmd
+ TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd
+ TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd
+ TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestReset(ctx context.Context, key string) *StatusCmd
+ TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd
+ TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd
+}
+
+type BFInsertOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+ NoCreate bool
+}
+
+type BFReserveOptions struct {
+ Capacity int64
+ Error float64
+ Expansion int64
+ NonScaling bool
+}
+
+type CFReserveOptions struct {
+ Capacity int64
+ BucketSize int64
+ MaxIterations int64
+ Expansion int64
+}
+
+type CFInsertOptions struct {
+ Capacity int64
+ NoCreate bool
+}
+
+// -------------------------------------------
+// Bloom filter commands
+//-------------------------------------------
+
+// BFReserve creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserve(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveExpansion creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying an expansion rate for the filter.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveExpansion(ctx context.Context, key string, errorRate float64, capacity, expansion int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveNonScaling creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying that the filter should not scale.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveNonScaling(ctx context.Context, key string, errorRate float64, capacity int64) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key, errorRate, capacity, "NONSCALING"}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFReserveWithArgs creates an empty Bloom filter with a single sub-filter
+// for the initial specified capacity and with an upper bound error_rate.
+// This function also allows for specifying additional options such as expansion rate and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.reserve/
+func (c cmdable) BFReserveWithArgs(ctx context.Context, key string, options *BFReserveOptions) *StatusCmd {
+ args := []interface{}{"BF.RESERVE", key}
+ if options != nil {
+ args = append(args, options.Error, options.Capacity)
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFAdd adds an item to a Bloom filter.
+// For more information - https://redis.io/commands/bf.add/
+func (c cmdable) BFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFCard returns the cardinality of a Bloom filter -
+// number of items that were added to a Bloom filter and detected as unique
+// (items that caused at least one bit to be set in at least one sub-filter).
+// For more information - https://redis.io/commands/bf.card/
+func (c cmdable) BFCard(ctx context.Context, key string) *IntCmd {
+ args := []interface{}{"BF.CARD", key}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFExists determines whether a given item was added to a Bloom filter.
+// For more information - https://redis.io/commands/bf.exists/
+func (c cmdable) BFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"BF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFLoadChunk restores a Bloom filter previously saved using BF.SCANDUMP.
+// For more information - https://redis.io/commands/bf.loadchunk/
+func (c cmdable) BFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"BF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Begins an incremental save of the Bloom filter.
+// This command is useful for large Bloom filters that cannot fit into the DUMP and RESTORE model.
+// For more information - https://redis.io/commands/bf.scandump/
+func (c cmdable) BFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"BF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ScanDump struct {
+ Iter int64
+ Data string
+}
+
+type ScanDumpCmd struct {
+ baseCmd
+
+ val ScanDump
+}
+
+func newScanDumpCmd(ctx context.Context, args ...interface{}) *ScanDumpCmd {
+ return &ScanDumpCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ScanDumpCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ScanDumpCmd) SetVal(val ScanDump) {
+ cmd.val = val
+}
+
+func (cmd *ScanDumpCmd) Result() (ScanDump, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ScanDumpCmd) Val() ScanDump {
+ return cmd.val
+}
+
+func (cmd *ScanDumpCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = ScanDump{}
+ for i := 0; i < n; i++ {
+ iter, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ data, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Data = data
+ cmd.val.Iter = iter
+
+ }
+
+ return nil
+}
+
+// Returns information about a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfo(ctx context.Context, key string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BFInfo struct {
+ Capacity int64
+ Size int64
+ Filters int64
+ ItemsInserted int64
+ ExpansionRate int64
+}
+
+type BFInfoCmd struct {
+ baseCmd
+
+ val BFInfo
+}
+
+func NewBFInfoCmd(ctx context.Context, args ...interface{}) *BFInfoCmd {
+ return &BFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BFInfoCmd) SetVal(val BFInfo) {
+ cmd.val = val
+}
+
+func (cmd *BFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BFInfoCmd) Val() BFInfo {
+ return cmd.val
+}
+
+func (cmd *BFInfoCmd) Result() (BFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result BFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of filters":
+ result.Filters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.ItemsInserted, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// BFInfoCapacity returns information about the capacity of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoCapacity(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "CAPACITY")
+}
+
+// BFInfoSize returns information about the size of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoSize(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "SIZE")
+}
+
+// BFInfoFilters returns information about the filters of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoFilters(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "FILTERS")
+}
+
+// BFInfoItems returns information about the items of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoItems(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "ITEMS")
+}
+
+// BFInfoExpansion returns information about the expansion rate of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoExpansion(ctx context.Context, key string) *BFInfoCmd {
+ return c.BFInfoArg(ctx, key, "EXPANSION")
+}
+
+// BFInfoArg returns information about a specific option of a Bloom filter.
+// For more information - https://redis.io/commands/bf.info/
+func (c cmdable) BFInfoArg(ctx context.Context, key, option string) *BFInfoCmd {
+ args := []interface{}{"BF.INFO", key, option}
+ cmd := NewBFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFInsert inserts elements into a Bloom filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// For more information - https://redis.io/commands/bf.insert/
+func (c cmdable) BFInsert(ctx context.Context, key string, options *BFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.INSERT", key}
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.Error != 0 {
+ args = append(args, "ERROR", options.Error)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ if options.NonScaling {
+ args = append(args, "NONSCALING")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMAdd adds multiple elements to a Bloom filter.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/bf.madd/
+func (c cmdable) BFMAdd(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MADD", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BFMExists check if multiple elements exist in a Bloom filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/bf.mexists/
+func (c cmdable) BFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"BF.MEXISTS", key}
+ args = append(args, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// Cuckoo filter commands
+//-------------------------------------------
+
+// CFReserve creates an empty Cuckoo filter with the specified capacity.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserve(ctx context.Context, key string, capacity int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveExpansion creates an empty Cuckoo filter with the specified capacity and expansion rate.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveExpansion(ctx context.Context, key string, capacity int64, expansion int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "EXPANSION", expansion}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveBucketSize creates an empty Cuckoo filter with the specified capacity and bucket size.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveBucketSize(ctx context.Context, key string, capacity int64, bucketsize int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "BUCKETSIZE", bucketsize}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveMaxIterations creates an empty Cuckoo filter with the specified capacity and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveMaxIterations(ctx context.Context, key string, capacity int64, maxiterations int64) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, capacity, "MAXITERATIONS", maxiterations}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFReserveWithArgs creates an empty Cuckoo filter with the specified options.
+// This function allows for specifying additional options such as bucket size and maximum number of iterations.
+// For more information - https://redis.io/commands/cf.reserve/
+func (c cmdable) CFReserveWithArgs(ctx context.Context, key string, options *CFReserveOptions) *StatusCmd {
+ args := []interface{}{"CF.RESERVE", key, options.Capacity}
+ if options.BucketSize != 0 {
+ args = append(args, "BUCKETSIZE", options.BucketSize)
+ }
+ if options.MaxIterations != 0 {
+ args = append(args, "MAXITERATIONS", options.MaxIterations)
+ }
+ if options.Expansion != 0 {
+ args = append(args, "EXPANSION", options.Expansion)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAdd adds an element to a Cuckoo filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.add/
+func (c cmdable) CFAdd(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADD", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFAddNX adds an element to a Cuckoo filter only if it does not already exist in the filter.
+// Returns true if the element was added to the filter or false if it already exists in the filter.
+// For more information - https://redis.io/commands/cf.addnx/
+func (c cmdable) CFAddNX(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.ADDNX", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFCount returns an estimate of the number of times an element may be in a Cuckoo Filter.
+// For more information - https://redis.io/commands/cf.count/
+func (c cmdable) CFCount(ctx context.Context, key string, element interface{}) *IntCmd {
+ args := []interface{}{"CF.COUNT", key, element}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFDel deletes an item once from the cuckoo filter.
+// For more information - https://redis.io/commands/cf.del/
+func (c cmdable) CFDel(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.DEL", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFExists determines whether an item may exist in the Cuckoo Filter or not.
+// For more information - https://redis.io/commands/cf.exists/
+func (c cmdable) CFExists(ctx context.Context, key string, element interface{}) *BoolCmd {
+ args := []interface{}{"CF.EXISTS", key, element}
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFLoadChunk restores a filter previously saved using SCANDUMP.
+// For more information - https://redis.io/commands/cf.loadchunk/
+func (c cmdable) CFLoadChunk(ctx context.Context, key string, iterator int64, data interface{}) *StatusCmd {
+ args := []interface{}{"CF.LOADCHUNK", key, iterator, data}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFScanDump begins an incremental save of the cuckoo filter.
+// For more information - https://redis.io/commands/cf.scandump/
+func (c cmdable) CFScanDump(ctx context.Context, key string, iterator int64) *ScanDumpCmd {
+ args := []interface{}{"CF.SCANDUMP", key, iterator}
+ cmd := newScanDumpCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CFInfo struct {
+ Size int64
+ NumBuckets int64
+ NumFilters int64
+ NumItemsInserted int64
+ NumItemsDeleted int64
+ BucketSize int64
+ ExpansionRate int64
+ MaxIteration int64
+}
+
+type CFInfoCmd struct {
+ baseCmd
+
+ val CFInfo
+}
+
+func NewCFInfoCmd(ctx context.Context, args ...interface{}) *CFInfoCmd {
+ return &CFInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CFInfoCmd) SetVal(val CFInfo) {
+ cmd.val = val
+}
+
+func (cmd *CFInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CFInfoCmd) Val() CFInfo {
+ return cmd.val
+}
+
+func (cmd *CFInfoCmd) Result() (CFInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CFInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CFInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Size":
+ result.Size, err = rd.ReadInt()
+ case "Number of buckets":
+ result.NumBuckets, err = rd.ReadInt()
+ case "Number of filters":
+ result.NumFilters, err = rd.ReadInt()
+ case "Number of items inserted":
+ result.NumItemsInserted, err = rd.ReadInt()
+ case "Number of items deleted":
+ result.NumItemsDeleted, err = rd.ReadInt()
+ case "Bucket size":
+ result.BucketSize, err = rd.ReadInt()
+ case "Expansion rate":
+ result.ExpansionRate, err = rd.ReadInt()
+ case "Max iterations":
+ result.MaxIteration, err = rd.ReadInt()
+
+ default:
+ return fmt.Errorf("redis: CF.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CFInfo returns information about a Cuckoo filter.
+// For more information - https://redis.io/commands/cf.info/
+func (c cmdable) CFInfo(ctx context.Context, key string) *CFInfoCmd {
+ args := []interface{}{"CF.INFO", key}
+ cmd := NewCFInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsert inserts elements into a Cuckoo filter.
+// This function also allows for specifying additional options such as capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of booleans indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insert/
+func (c cmdable) CFInsert(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.INSERT", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CFInsertNX inserts elements into a Cuckoo filter only if they do not already exist in the filter.
+// This function also allows for specifying additional options such as:
+// capacity, error rate, expansion rate, and non-scaling behavior.
+// Returns an array of integers indicating whether each element was added to the filter or not.
+// For more information - https://redis.io/commands/cf.insertnx/
+func (c cmdable) CFInsertNX(ctx context.Context, key string, options *CFInsertOptions, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CF.INSERTNX", key}
+ args = c.getCfInsertWithArgs(args, options, elements...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) getCfInsertWithArgs(args []interface{}, options *CFInsertOptions, elements ...interface{}) []interface{} {
+ if options != nil {
+ if options.Capacity != 0 {
+ args = append(args, "CAPACITY", options.Capacity)
+ }
+ if options.NoCreate {
+ args = append(args, "NOCREATE")
+ }
+ }
+ args = append(args, "ITEMS")
+ args = append(args, elements...)
+
+ return args
+}
+
+// CFMExists check if multiple elements exist in a Cuckoo filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/cf.mexists/
+func (c cmdable) CFMExists(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := []interface{}{"CF.MEXISTS", key}
+ args = append(args, elements...)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// CMS commands
+//-------------------------------------------
+
+// CMSIncrBy increments the count of one or more items in a Count-Min Sketch filter.
+// Returns an array of integers representing the updated count of each item.
+// For more information - https://redis.io/commands/cms.incrby/
+func (c cmdable) CMSIncrBy(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "CMS.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type CMSInfo struct {
+ Width int64
+ Depth int64
+ Count int64
+}
+
+type CMSInfoCmd struct {
+ baseCmd
+
+ val CMSInfo
+}
+
+func NewCMSInfoCmd(ctx context.Context, args ...interface{}) *CMSInfoCmd {
+ return &CMSInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CMSInfoCmd) SetVal(val CMSInfo) {
+ cmd.val = val
+}
+
+func (cmd *CMSInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CMSInfoCmd) Val() CMSInfo {
+ return cmd.val
+}
+
+func (cmd *CMSInfoCmd) Result() (CMSInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *CMSInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result CMSInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "count":
+ result.Count, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: CMS.INFO unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// CMSInfo returns information about a Count-Min Sketch filter.
+// For more information - https://redis.io/commands/cms.info/
+func (c cmdable) CMSInfo(ctx context.Context, key string) *CMSInfoCmd {
+ args := []interface{}{"CMS.INFO", key}
+ cmd := NewCMSInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByDim creates an empty Count-Min Sketch filter with the specified dimensions.
+// For more information - https://redis.io/commands/cms.initbydim/
+func (c cmdable) CMSInitByDim(ctx context.Context, key string, width, depth int64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYDIM", key, width, depth}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSInitByProb creates an empty Count-Min Sketch filter with the specified error rate and probability.
+// For more information - https://redis.io/commands/cms.initbyprob/
+func (c cmdable) CMSInitByProb(ctx context.Context, key string, errorRate, probability float64) *StatusCmd {
+ args := []interface{}{"CMS.INITBYPROB", key, errorRate, probability}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMerge merges multiple Count-Min Sketch filters into a single filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMerge(ctx context.Context, destKey string, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"CMS.MERGE", destKey, len(sourceKeys)}
+ for _, s := range sourceKeys {
+ args = append(args, s)
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSMergeWithWeight merges multiple Count-Min Sketch filters into a single filter with weights for each source filter.
+// The destination filter must not exist and will be created with the dimensions of the first source filter.
+// The number of items in each source filter must be equal.
+// Returns OK on success or an error if the filters could not be merged.
+// For more information - https://redis.io/commands/cms.merge/
+func (c cmdable) CMSMergeWithWeight(ctx context.Context, destKey string, sourceKeys map[string]int64) *StatusCmd {
+ args := make([]interface{}, 0, 4+(len(sourceKeys)*2+1))
+ args = append(args, "CMS.MERGE", destKey, len(sourceKeys))
+
+ if len(sourceKeys) > 0 {
+ sk := make([]interface{}, len(sourceKeys))
+ sw := make([]interface{}, len(sourceKeys))
+
+ i := 0
+ for k, w := range sourceKeys {
+ sk[i] = k
+ sw[i] = w
+ i++
+ }
+
+ args = append(args, sk...)
+ args = append(args, "WEIGHTS")
+ args = append(args, sw...)
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// CMSQuery returns count for item(s).
+// For more information - https://redis.io/commands/cms.query/
+func (c cmdable) CMSQuery(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := []interface{}{"CMS.QUERY", key}
+ args = append(args, elements...)
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// TopK commands
+//--------------------------------------------
+
+// TopKAdd adds one or more elements to a Top-K filter.
+// Returns an array of strings representing the items that were removed from the filter, if any.
+// For more information - https://redis.io/commands/topk.add/
+func (c cmdable) TopKAdd(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.ADD"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserve creates an empty Top-K filter with the specified number of top items to keep.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserve(ctx context.Context, key string, k int64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKReserveWithOptions creates an empty Top-K filter with the specified number of top items to keep and additional options.
+// This function allows for specifying additional options such as width, depth and decay.
+// For more information - https://redis.io/commands/topk.reserve/
+func (c cmdable) TopKReserveWithOptions(ctx context.Context, key string, k int64, width, depth int64, decay float64) *StatusCmd {
+ args := []interface{}{"TOPK.RESERVE", key, k, width, depth, decay}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TopKInfo struct {
+ K int64
+ Width int64
+ Depth int64
+ Decay float64
+}
+
+type TopKInfoCmd struct {
+ baseCmd
+
+ val TopKInfo
+}
+
+func NewTopKInfoCmd(ctx context.Context, args ...interface{}) *TopKInfoCmd {
+ return &TopKInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TopKInfoCmd) SetVal(val TopKInfo) {
+ cmd.val = val
+}
+
+func (cmd *TopKInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TopKInfoCmd) Val() TopKInfo {
+ return cmd.val
+}
+
+func (cmd *TopKInfoCmd) Result() (TopKInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TopKInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TopKInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "k":
+ result.K, err = rd.ReadInt()
+ case "width":
+ result.Width, err = rd.ReadInt()
+ case "depth":
+ result.Depth, err = rd.ReadInt()
+ case "decay":
+ result.Decay, err = rd.ReadFloat()
+ default:
+ return fmt.Errorf("redis: topk.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TopKInfo returns information about a Top-K filter.
+// For more information - https://redis.io/commands/topk.info/
+func (c cmdable) TopKInfo(ctx context.Context, key string) *TopKInfoCmd {
+ args := []interface{}{"TOPK.INFO", key}
+
+ cmd := NewTopKInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKQuery check if multiple elements exist in a Top-K filter.
+// Returns an array of booleans indicating whether each element exists in the filter or not.
+// For more information - https://redis.io/commands/topk.query/
+func (c cmdable) TopKQuery(ctx context.Context, key string, elements ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.QUERY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKCount returns an estimate of the number of times an item may be in a Top-K filter.
+// For more information - https://redis.io/commands/topk.count/
+func (c cmdable) TopKCount(ctx context.Context, key string, elements ...interface{}) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.COUNT"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKIncrBy increases the count of one or more items in a Top-K filter.
+// For more information - https://redis.io/commands/topk.incrby/
+func (c cmdable) TopKIncrBy(ctx context.Context, key string, elements ...interface{}) *StringSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TOPK.INCRBY"
+ args[1] = key
+ args = appendArgs(args, elements)
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKList returns all items in Top-K list.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKList(ctx context.Context, key string) *StringSliceCmd {
+ args := []interface{}{"TOPK.LIST", key}
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TopKListWithCount returns all items in Top-K list with their respective count.
+// For more information - https://redis.io/commands/topk.list/
+func (c cmdable) TopKListWithCount(ctx context.Context, key string) *MapStringIntCmd {
+ args := []interface{}{"TOPK.LIST", key, "WITHCOUNT"}
+
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// -------------------------------------------
+// t-digest commands
+// --------------------------------------------
+
+// TDigestAdd adds one or more elements to a t-Digest data structure.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.add/
+func (c cmdable) TDigestAdd(ctx context.Context, key string, elements ...float64) *StatusCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.ADD"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRank returns an array of values from a t-Digest data structure based on their rank.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrank/
+func (c cmdable) TDigestByRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestByRevRank returns an array of values from a t-Digest data structure based on their reverse rank.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of floats representing the values at the specified ranks or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.byrevrank/
+func (c cmdable) TDigestByRevRank(ctx context.Context, key string, rank ...uint64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(rank))
+ args[0] = "TDIGEST.BYREVRANK"
+ args[1] = key
+
+ // Convert uint slice to []interface{}
+ interfaceSlice := make([]interface{}, len(rank))
+ for i, v := range rank {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCDF returns an array of cumulative distribution function (CDF) values for one or more elements in a t-Digest data structure.
+// The CDF value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the CDF values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.cdf/
+func (c cmdable) TDigestCDF(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.CDF"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreate creates an empty t-Digest data structure with default parameters.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestCreateWithCompression creates an empty t-Digest data structure with a specified compression parameter.
+// The compression parameter controls the accuracy and memory usage of the t-Digest.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.create/
+func (c cmdable) TDigestCreateWithCompression(ctx context.Context, key string, compression int64) *StatusCmd {
+ args := []interface{}{"TDIGEST.CREATE", key, "COMPRESSION", compression}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestInfo struct {
+ Compression int64
+ Capacity int64
+ MergedNodes int64
+ UnmergedNodes int64
+ MergedWeight int64
+ UnmergedWeight int64
+ Observations int64
+ TotalCompressions int64
+ MemoryUsage int64
+}
+
+type TDigestInfoCmd struct {
+ baseCmd
+
+ val TDigestInfo
+}
+
+func NewTDigestInfoCmd(ctx context.Context, args ...interface{}) *TDigestInfoCmd {
+ return &TDigestInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TDigestInfoCmd) SetVal(val TDigestInfo) {
+ cmd.val = val
+}
+
+func (cmd *TDigestInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TDigestInfoCmd) Val() TDigestInfo {
+ return cmd.val
+}
+
+func (cmd *TDigestInfoCmd) Result() (TDigestInfo, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TDigestInfoCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+
+ var key string
+ var result TDigestInfo
+ for f := 0; f < n; f++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "Compression":
+ result.Compression, err = rd.ReadInt()
+ case "Capacity":
+ result.Capacity, err = rd.ReadInt()
+ case "Merged nodes":
+ result.MergedNodes, err = rd.ReadInt()
+ case "Unmerged nodes":
+ result.UnmergedNodes, err = rd.ReadInt()
+ case "Merged weight":
+ result.MergedWeight, err = rd.ReadInt()
+ case "Unmerged weight":
+ result.UnmergedWeight, err = rd.ReadInt()
+ case "Observations":
+ result.Observations, err = rd.ReadInt()
+ case "Total compressions":
+ result.TotalCompressions, err = rd.ReadInt()
+ case "Memory usage":
+ result.MemoryUsage, err = rd.ReadInt()
+ default:
+ return fmt.Errorf("redis: tdigest.info unexpected key %s", key)
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val = result
+ return nil
+}
+
+// TDigestInfo returns information about a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.info/
+func (c cmdable) TDigestInfo(ctx context.Context, key string) *TDigestInfoCmd {
+ args := []interface{}{"TDIGEST.INFO", key}
+
+ cmd := NewTDigestInfoCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMax returns the maximum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.max/
+func (c cmdable) TDigestMax(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MAX", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TDigestMergeOptions struct {
+ Compression int64
+ Override bool
+}
+
+// TDigestMerge merges multiple t-Digest data structures into a single t-Digest.
+// This function also allows for specifying additional options such as compression and override behavior.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.merge/
+func (c cmdable) TDigestMerge(ctx context.Context, destKey string, options *TDigestMergeOptions, sourceKeys ...string) *StatusCmd {
+ args := []interface{}{"TDIGEST.MERGE", destKey, len(sourceKeys)}
+
+ for _, sourceKey := range sourceKeys {
+ args = append(args, sourceKey)
+ }
+
+ if options != nil {
+ if options.Compression != 0 {
+ args = append(args, "COMPRESSION", options.Compression)
+ }
+ if options.Override {
+ args = append(args, "OVERRIDE")
+ }
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestMin returns the minimum value from a t-Digest data structure.
+// For more information - https://redis.io/commands/tdigest.min/
+func (c cmdable) TDigestMin(ctx context.Context, key string) *FloatCmd {
+ args := []interface{}{"TDIGEST.MIN", key}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestQuantile returns an array of quantile values for one or more elements in a t-Digest data structure.
+// The quantile value for an element is the fraction of all elements in the t-Digest that are less than or equal to it.
+// Returns an array of floats representing the quantile values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.quantile/
+func (c cmdable) TDigestQuantile(ctx context.Context, key string, elements ...float64) *FloatSliceCmd {
+ args := make([]interface{}, 2, 2+len(elements))
+ args[0] = "TDIGEST.QUANTILE"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(elements))
+ for i, v := range elements {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRank returns an array of rank values for one or more elements in a t-Digest data structure.
+// The rank of an element is its position in the sorted list of all elements in the t-Digest.
+// Returns an array of integers representing the rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.rank/
+func (c cmdable) TDigestRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.RANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestReset resets a t-Digest data structure to its initial state.
+// Returns OK on success or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.reset/
+func (c cmdable) TDigestReset(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TDIGEST.RESET", key}
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestRevRank returns an array of reverse rank values for one or more elements in a t-Digest data structure.
+// The reverse rank of an element is its position in the sorted list of all elements in the t-Digest when sorted in descending order.
+// Returns an array of integers representing the reverse rank values for each element or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.revrank/
+func (c cmdable) TDigestRevRank(ctx context.Context, key string, values ...float64) *IntSliceCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "TDIGEST.REVRANK"
+ args[1] = key
+
+ // Convert floatSlice to []interface{}
+ interfaceSlice := make([]interface{}, len(values))
+ for i, v := range values {
+ interfaceSlice[i] = v
+ }
+
+ args = append(args, interfaceSlice...)
+
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TDigestTrimmedMean returns the trimmed mean value from a t-Digest data structure.
+// The trimmed mean is calculated by removing a specified fraction of the highest and lowest values from the t-Digest and then calculating the mean of the remaining values.
+// Returns a float representing the trimmed mean value or an error if the operation could not be completed.
+// For more information - https://redis.io/commands/tdigest.trimmed_mean/
+func (c cmdable) TDigestTrimmedMean(ctx context.Context, key string, lowCutQuantile, highCutQuantile float64) *FloatCmd {
+ args := []interface{}{"TDIGEST.TRIMMED_MEAN", key, lowCutQuantile, highCutQuantile}
+
+ cmd := NewFloatCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go
similarity index 86%
rename from vendor/github.com/go-redis/redis/v8/pubsub.go
rename to vendor/github.com/redis/go-redis/v9/pubsub.go
index efc2354af0..5df537c422 100644
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ b/vendor/github.com/redis/go-redis/v9/pubsub.go
@@ -7,9 +7,9 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// PubSub implements Pub/Sub commands as described in
@@ -24,10 +24,11 @@ type PubSub struct {
newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
- mu sync.Mutex
- cn *pool.Conn
- channels map[string]struct{}
- patterns map[string]struct{}
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+ schannels map[string]struct{}
closed bool
exit chan struct{}
@@ -46,6 +47,7 @@ func (c *PubSub) init() {
func (c *PubSub) String() string {
channels := mapKeys(c.channels)
channels = append(channels, mapKeys(c.patterns)...)
+ channels = append(channels, mapKeys(c.schannels)...)
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
@@ -82,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er
}
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
- return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
}
@@ -101,6 +103,13 @@ func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
}
}
+ if len(c.schannels) > 0 {
+ err := c._subscribe(ctx, cn, "ssubscribe", mapKeys(c.schannels))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
return firstErr
}
@@ -208,15 +217,38 @@ func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
return err
}
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *PubSub) SSubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "ssubscribe", channels...)
+ if c.schannels == nil {
+ c.schannels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.schannels[s] = struct{}{}
+ }
+ return err
+}
+
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, channel := range channels {
- delete(c.channels, channel)
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.channels {
+ delete(c.channels, channel)
+ }
}
+
err := c.subscribe(ctx, "unsubscribe", channels...)
return err
}
@@ -227,13 +259,42 @@ func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- for _, pattern := range patterns {
- delete(c.patterns, pattern)
+ if len(patterns) > 0 {
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ } else {
+ // Unsubscribe from all patterns.
+ for pattern := range c.patterns {
+ delete(c.patterns, pattern)
+ }
}
+
err := c.subscribe(ctx, "punsubscribe", patterns...)
return err
}
+// SUnsubscribe unsubscribes the client from the given shard channels,
+// or from all of them if none is given.
+func (c *PubSub) SUnsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if len(channels) > 0 {
+ for _, channel := range channels {
+ delete(c.schannels, channel)
+ }
+ } else {
+ // Unsubscribe from all channels.
+ for channel := range c.schannels {
+ delete(c.schannels, channel)
+ }
+ }
+
+ err := c.subscribe(ctx, "sunsubscribe", channels...)
+ return err
+}
+
func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
cn, err := c.conn(ctx, channels)
if err != nil {
@@ -311,7 +372,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
}, nil
case []interface{}:
switch kind := reply[0].(string); kind {
- case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe", "ssubscribe", "sunsubscribe":
// Can be nil in case of "unsubscribe".
channel, _ := reply[1].(string)
return &Subscription{
@@ -319,7 +380,7 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
Channel: channel,
Count: int(reply[2].(int64)),
}, nil
- case "message":
+ case "message", "smessage":
switch payload := reply[2].(type) {
case string:
return &Message{
@@ -371,7 +432,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
return nil, err
}
- err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
@@ -426,11 +487,11 @@ func (c *PubSub) getContext() context.Context {
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
+// is blocked full for 1 minute the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
+// and re-subscribes if ping can not not received for 1 minute.
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
c.chOnce.Do(func() {
c.msgCh = newChannel(c, opts...)
@@ -456,9 +517,9 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+func (c *PubSub) ChannelWithSubscriptions(opts ...ChannelOption) <-chan interface{} {
c.chOnce.Do(func() {
- c.allCh = newChannel(c, WithChannelSize(size))
+ c.allCh = newChannel(c, opts...)
c.allCh.initAllChan()
})
if c.allCh == nil {
diff --git a/vendor/github.com/redis/go-redis/v9/pubsub_commands.go b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
new file mode 100644
index 0000000000..28622aa6bc
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/pubsub_commands.go
@@ -0,0 +1,76 @@
+package redis
+
+import "context"
+
+type PubSubCmdable interface {
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ SPublish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+ PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd
+}
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SPublish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "spublish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "shardchannels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "shardnumsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewMapStringIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go
new file mode 100644
index 0000000000..d25a0d3142
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/redis.go
@@ -0,0 +1,852 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hscan"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+// Scanner internal/hscan.Scanner exposed interface.
+type Scanner = hscan.Scanner
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+// SetLogger set custom log
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ DialHook(next DialHook) DialHook
+ ProcessHook(next ProcessHook) ProcessHook
+ ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook
+}
+
+type (
+ DialHook func(ctx context.Context, network, addr string) (net.Conn, error)
+ ProcessHook func(ctx context.Context, cmd Cmder) error
+ ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error
+)
+
+type hooksMixin struct {
+ hooksMu *sync.Mutex
+
+ slice []Hook
+ initial hooks
+ current hooks
+}
+
+func (hs *hooksMixin) initHooks(hooks hooks) {
+ hs.hooksMu = new(sync.Mutex)
+ hs.initial = hooks
+ hs.chain()
+}
+
+type hooks struct {
+ dial DialHook
+ process ProcessHook
+ pipeline ProcessPipelineHook
+ txPipeline ProcessPipelineHook
+}
+
+func (h *hooks) setDefaults() {
+ if h.dial == nil {
+ h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil }
+ }
+ if h.process == nil {
+ h.process = func(ctx context.Context, cmd Cmder) error { return nil }
+ }
+ if h.pipeline == nil {
+ h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+ if h.txPipeline == nil {
+ h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil }
+ }
+}
+
+// AddHook is to add a hook to the queue.
+// Hook is a function executed during network connection, command execution, and pipeline,
+// it is a first-in-first-out stack queue (FIFO).
+// You need to execute the next hook in each hook, unless you want to terminate the execution of the command.
+// For example, you added hook-1, hook-2:
+//
+// client.AddHook(hook-1, hook-2)
+//
+// hook-1:
+//
+// func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd Cmder) error {
+// print("hook-1 start")
+// next(ctx, cmd)
+// print("hook-1 end")
+// return nil
+// }
+// }
+//
+// hook-2:
+//
+// func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook {
+// return func(ctx context.Context, cmd redis.Cmder) error {
+// print("hook-2 start")
+// next(ctx, cmd)
+// print("hook-2 end")
+// return nil
+// }
+// }
+//
+// The execution sequence is:
+//
+// hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end
+//
+// Please note: "next(ctx, cmd)" is very important, it will call the next hook,
+// if "next(ctx, cmd)" is not executed, the redis command will not be executed.
+func (hs *hooksMixin) AddHook(hook Hook) {
+ hs.slice = append(hs.slice, hook)
+ hs.chain()
+}
+
+func (hs *hooksMixin) chain() {
+ hs.initial.setDefaults()
+
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ hs.current.dial = hs.initial.dial
+ hs.current.process = hs.initial.process
+ hs.current.pipeline = hs.initial.pipeline
+ hs.current.txPipeline = hs.initial.txPipeline
+
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil {
+ hs.current.dial = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil {
+ hs.current.process = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil {
+ hs.current.pipeline = wrapped
+ }
+ if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil {
+ hs.current.txPipeline = wrapped
+ }
+ }
+}
+
+func (hs *hooksMixin) clone() hooksMixin {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+
+ clone := *hs
+ l := len(clone.slice)
+ clone.slice = clone.slice[:l:l]
+ clone.hooksMu = new(sync.Mutex)
+ return clone
+}
+
+func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmd)
+}
+
+func (hs *hooksMixin) withProcessPipelineHook(
+ ctx context.Context, cmds []Cmder, hook ProcessPipelineHook,
+) error {
+ for i := len(hs.slice) - 1; i >= 0; i-- {
+ if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil {
+ hook = wrapped
+ }
+ }
+ return hook(ctx, cmds)
+}
+
+func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) {
+ hs.hooksMu.Lock()
+ defer hs.hooksMu.Unlock()
+ return hs.current.dial(ctx, network, addr)
+}
+
+func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error {
+ return hs.current.process(ctx, cmd)
+}
+
+func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.pipeline(ctx, cmds)
+}
+
+func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error {
+ return hs.current.txPipeline(ctx, cmds)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ if err := c.initConn(ctx, cn); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ username, password := c.opt.Username, c.opt.Password
+ if c.opt.CredentialsProvider != nil {
+ username, password = c.opt.CredentialsProvider()
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(c.opt, connPool)
+
+ var auth bool
+ protocol := c.opt.Protocol
+ // By default, use RESP3 in current version.
+ if protocol < 2 {
+ protocol = 3
+ }
+
+ // for redis-server versions that do not support the HELLO command,
+ // RESP2 will continue to be used.
+ if err := conn.Hello(ctx, protocol, username, password, "").Err(); err == nil {
+ auth = true
+ } else if !isRedisError(err) {
+ // When the server responds with the RESP protocol and the result is not a normal
+ // execution result of the HELLO command, we consider it to be an indication that
+ // the server does not support the HELLO command.
+ // The server may be a redis-server that does not support the HELLO command,
+ // or it could be DragonflyDB or a third-party redis-proxy. They all respond
+ // with different error string results for unsupported commands, making it
+ // difficult to rely on error strings to determine all results.
+ return err
+ }
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if !auth && password != "" {
+ if username != "" {
+ pipe.AuthACL(ctx, username, password)
+ } else {
+ pipe.Auth(ctx, password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ if c.opt.ClientName != "" {
+ pipe.ClientSetName(ctx, c.opt.ClientName)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !c.opt.DisableIndentity {
+ libName := ""
+ libVer := Version()
+ if c.opt.IdentitySuffix != "" {
+ libName = c.opt.IdentitySuffix
+ }
+ p := conn.Pipeline()
+ p.ClientSetInfo(ctx, WithLibraryName(libName))
+ p.ClientSetInfo(ctx, WithLibraryVersion(libVer))
+ _, _ = p.Exec(ctx)
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false, c.opt.Addr) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ var fnErr error
+ defer func() {
+ c.releaseConn(ctx, cn, fnErr)
+ }()
+
+ fnErr = fn(ctx, cn)
+
+ return fnErr
+}
+
+func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ return c.opt.Dialer(ctx, network, addr)
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ retry, err := c._process(ctx, cmd, attempt)
+ if err == nil || !retry {
+ return err
+ }
+
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(0)
+ if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ }); err != nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ return err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ } else {
+ atomic.StoreUint32(&retryTimeout, 0)
+ }
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+ }
+
+ return false, nil
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil {
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ // Enable retries by default to retry dial errors returned by withConn.
+ canRetry := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ }); err != nil {
+ return true, err
+ }
+
+ return false, nil
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for i, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ setCmdsErr(cmds[i+1:], err)
+ return err
+ }
+ }
+ // Retry errors like "LOADING redis is loading the dataset in memory".
+ return cmds[0].Err()
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ }); err != nil {
+ setCmdsErr(cmds, err)
+ return true, err
+ }
+
+ if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ trimmedCmds := cmds[1 : len(cmds)-1]
+
+ if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return pipelineReadCmds(rd, trimmedCmds)
+ }); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse +OK.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ // Parse +QUEUED.
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ if line[0] != proto.RespArray {
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *baseClient) context(ctx context.Context) context.Context {
+ if c.opt.ContextTimeoutEnabled {
+ return ctx
+ }
+ return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more underlying connections.
+// It's safe for concurrent use by multiple goroutines.
+//
+// Client creates and frees connections automatically; it also maintains a free pool
+// of idle connections. You can control the pool size with Config.PoolSize option.
+type Client struct {
+ *baseClient
+ cmdable
+ hooksMixin
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ c.init()
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return &c
+}
+
+func (c *Client) init() {
+ c.cmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := *c
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ clone.init()
+ return &clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+// Channels can be omitted to create empty subscription.
+func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.SSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooksMixin
+}
+
+func newConn(opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ }
+
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
+
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipelineHook,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/redis/go-redis/v9/result.go
similarity index 87%
rename from vendor/github.com/go-redis/redis/v8/result.go
rename to vendor/github.com/redis/go-redis/v9/result.go
index 24cfd49940..cfd4cf92ed 100644
--- a/vendor/github.com/go-redis/redis/v8/result.go
+++ b/vendor/github.com/redis/go-redis/v9/result.go
@@ -82,17 +82,17 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
return &cmd
}
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
-func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
- var cmd StringStringMapCmd
+// NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing.
+func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd {
+ var cmd MapStringStringCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
}
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
-func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
- var cmd StringIntMapCmd
+// NewMapStringIntCmdResult returns a MapStringIntCmd initialised with val and err for testing.
+func NewMapStringIntCmdResult(val map[string]int64, err error) *MapStringIntCmd {
+ var cmd MapStringIntCmd
cmd.val = val
cmd.SetErr(err)
return &cmd
@@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
return &cmd
}
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+// NewZWithKeyCmdResult returns a ZWithKeyCmd initialised with val and err for testing.
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
@@ -178,3 +178,11 @@ func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
cmd.SetErr(err)
return &cmd
}
+
+// NewXPendingResult returns a XPendingCmd initialised with val and err for testing.
+func NewXPendingResult(val *XPending, err error) *XPendingCmd {
+ var cmd XPendingCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go
new file mode 100644
index 0000000000..4ae00542ba
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/ring.go
@@ -0,0 +1,827 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous" //nolint
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/hashtag"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided options.
+ NewClient func(opt *Options) *Client
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+
+ DisableIndentity bool
+ IdentitySuffix string
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ ClientName: opt.ClientName,
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+ addr string
+}
+
+func newRingShard(opt *RingOptions, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(clopt),
+ addr: addr,
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringSharding struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ shards *ringShards
+ closed bool
+ hash ConsistentHash
+ numShard int
+ onNewNode []func(rdb *Client)
+
+ // ensures exclusive access to SetAddrs so there is no need
+ // to hold mu for the duration of potentially long shard creation
+ setAddrsMu sync.Mutex
+}
+
+type ringShards struct {
+ m map[string]*ringShard
+ list []*ringShard
+}
+
+func newRingSharding(opt *RingOptions) *ringSharding {
+ c := &ringSharding{
+ opt: opt,
+ }
+ c.SetAddrs(opt.Addrs)
+
+ return c
+}
+
+func (c *ringSharding) OnNewNode(fn func(rdb *Client)) {
+ c.mu.Lock()
+ c.onNewNode = append(c.onNewNode, fn)
+ c.mu.Unlock()
+}
+
+// SetAddrs replaces the shards in use, such that you can increase and
+// decrease number of shards, that you use. It will reuse shards that
+// existed before and close the ones that will not be used anymore.
+func (c *ringSharding) SetAddrs(addrs map[string]string) {
+ c.setAddrsMu.Lock()
+ defer c.setAddrsMu.Unlock()
+
+ cleanup := func(shards map[string]*ringShard) {
+ for addr, shard := range shards {
+ if err := shard.Client.Close(); err != nil {
+ internal.Logger.Printf(context.Background(), "shard.Close %s failed: %s", addr, err)
+ }
+ }
+ }
+
+ c.mu.RLock()
+ if c.closed {
+ c.mu.RUnlock()
+ return
+ }
+ existing := c.shards
+ c.mu.RUnlock()
+
+ shards, created, unused := c.newRingShards(addrs, existing)
+
+ c.mu.Lock()
+ if c.closed {
+ cleanup(created)
+ c.mu.Unlock()
+ return
+ }
+ c.shards = shards
+ c.rebalanceLocked()
+ c.mu.Unlock()
+
+ cleanup(unused)
+}
+
+func (c *ringSharding) newRingShards(
+ addrs map[string]string, existing *ringShards,
+) (shards *ringShards, created, unused map[string]*ringShard) {
+ shards = &ringShards{m: make(map[string]*ringShard, len(addrs))}
+ created = make(map[string]*ringShard) // indexed by addr
+ unused = make(map[string]*ringShard) // indexed by addr
+
+ if existing != nil {
+ for _, shard := range existing.list {
+ unused[shard.addr] = shard
+ }
+ }
+
+ for name, addr := range addrs {
+ if shard, ok := unused[addr]; ok {
+ shards.m[name] = shard
+ delete(unused, addr)
+ } else {
+ shard := newRingShard(c.opt, addr)
+ shards.m[name] = shard
+ created[addr] = shard
+
+ for _, fn := range c.onNewNode {
+ fn(shard.Client)
+ }
+ }
+ }
+
+ for _, shard := range shards.m {
+ shards.list = append(shards.list, shard)
+ }
+
+ return
+}
+
+func (c *ringSharding) List() []*ringShard {
+ var list []*ringShard
+
+ c.mu.RLock()
+ if !c.closed {
+ list = c.shards.list
+ }
+ c.mu.RUnlock()
+
+ return list
+}
+
+func (c *ringSharding) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+
+ return hash
+}
+
+func (c *ringSharding) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ return nil, errRingShardsDown
+ }
+
+ shardName := c.hash.Get(key)
+ if shardName == "" {
+ return nil, errRingShardsDown
+ }
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.shards.m[shardName], nil
+}
+
+func (c *ringSharding) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// Heartbeat monitors state of each shard in the ring.
+func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(ctx, "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.mu.Lock()
+ c.rebalanceLocked()
+ c.mu.Unlock()
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// rebalanceLocked removes dead shards from the Ring.
+// Requires c.mu locked.
+func (c *ringSharding) rebalanceLocked() {
+ if c.closed {
+ return
+ }
+ if c.shards == nil {
+ return
+ }
+
+ liveShards := make([]string, 0, len(c.shards.m))
+
+ for name, shard := range c.shards.m {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ c.hash = c.opt.NewConsistentHash(liveShards)
+ c.numShard = len(liveShards)
+}
+
+func (c *ringSharding) Len() int {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.numShard
+}
+
+func (c *ringSharding) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+
+ for _, shard := range c.shards.list {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.hash = nil
+ c.shards = nil
+ c.numShard = 0
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ cmdable
+ hooksMixin
+
+ opt *RingOptions
+ sharding *ringSharding
+ cmdsInfoCache *cmdsInfoCache
+ heartbeatCancelFn context.CancelFunc
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ hbCtx, hbCancel := context.WithCancel(context.Background())
+
+ ring := Ring{
+ opt: opt,
+ sharding: newRingSharding(opt),
+ heartbeatCancelFn: hbCancel,
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ ring.initHooks(hooks{
+ process: ring.process,
+ pipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, false)
+ },
+ txPipeline: func(ctx context.Context, cmds []Cmder) error {
+ return ring.generalProcessPipeline(ctx, cmds, true)
+ },
+ })
+
+ go ring.sharding.Heartbeat(hbCtx, opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) SetAddrs(addrs map[string]string) {
+ c.sharding.SetAddrs(addrs)
+}
+
+// Do create a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.sharding.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.sharding.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// SSubscribe Subscribes the client to the specified shard channels.
+func (c *Ring) SSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+ shard, err := c.sharding.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.SSubscribe(ctx, channels...)
+}
+
+func (c *Ring) OnNewNode(fn func(rdb *Client)) {
+ c.sharding.OnNewNode(fn)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ shards := c.sharding.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ shards := c.sharding.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ pos := cmdFirstKeyPos(cmd)
+ if pos == 0 {
+ return c.sharding.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.sharding.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: pipelineExecer(c.processPipelineHook),
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ if tx {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+ }
+
+ cmdsMap := make(map[string][]Cmder)
+
+ for _, cmd := range cmds {
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd))
+ if hash != "" {
+ hash = c.sharding.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ // TODO: retry?
+ shard, err := c.sharding.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ if tx {
+ cmds = wrapMultiExec(ctx, cmds)
+ _ = shard.Client.processTxPipelineHook(ctx, cmds)
+ } else {
+ _ = shard.Client.processPipelineHook(ctx, cmds)
+ }
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.sharding.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ c.heartbeatCancelFn()
+
+ return c.sharding.Close()
+}
diff --git a/vendor/github.com/redis/go-redis/v9/script.go b/vendor/github.com/redis/go-redis/v9/script.go
new file mode 100644
index 0000000000..626ab03bb5
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/script.go
@@ -0,0 +1,84 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+)
+
+type Scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalRO(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+func (s *Script) EvalShaRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalShaRO(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
+
+// RunRO optimistically uses EVALSHA_RO to run the script. If script does not exist
+// it is retried using EVAL_RO.
+func (s *Script) RunRO(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalShaRO(ctx, c, keys, args...)
+ if HasErrorPrefix(r.Err(), "NOSCRIPT") {
+ return s.EvalRO(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/redis/go-redis/v9/scripting_commands.go b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
new file mode 100644
index 0000000000..af9c3397bf
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/scripting_commands.go
@@ -0,0 +1,215 @@
+package redis
+
+import "context"
+
+type ScriptingFunctionsCmdable interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ FunctionLoad(ctx context.Context, code string) *StringCmd
+ FunctionLoadReplace(ctx context.Context, code string) *StringCmd
+ FunctionDelete(ctx context.Context, libName string) *StringCmd
+ FunctionFlush(ctx context.Context) *StringCmd
+ FunctionKill(ctx context.Context) *StringCmd
+ FunctionFlushAsync(ctx context.Context) *StringCmd
+ FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd
+ FunctionDump(ctx context.Context) *StringCmd
+ FunctionRestore(ctx context.Context, libDump string) *StringCmd
+ FunctionStats(ctx context.Context) *FunctionStatsCmd
+ FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+ FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd
+}
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval", script, keys, args...)
+}
+
+func (c cmdable) EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "eval_ro", script, keys, args...)
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha", sha1, keys, args...)
+}
+
+func (c cmdable) EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ return c.eval(ctx, "evalsha_ro", sha1, keys, args...)
+}
+
+func (c cmdable) eval(ctx context.Context, name, payload string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = name
+ cmdArgs[1] = payload
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+
+ // it is possible that only args exist without a key.
+ // rdb.eval(ctx, eval, script, nil, arg1, arg2)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ------------------------------------------------------------------------------
+
+// FunctionListQuery is used with FunctionList to query for Redis libraries
+//
+// LibraryNamePattern - Use an empty string to get all libraries.
+// - Use a glob-style pattern to match multiple libraries with a matching name
+// - Use a library's full name to match a single library
+// WithCode - If true, it will return the code of the library
+type FunctionListQuery struct {
+ LibraryNamePattern string
+ WithCode bool
+}
+
+func (c cmdable) FunctionLoad(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionLoadReplace(ctx context.Context, code string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "load", "replace", code)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDelete(ctx context.Context, libName string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "delete", libName)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlush(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionKill(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionFlushAsync(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "flush", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd {
+ args := make([]interface{}, 2, 5)
+ args[0] = "function"
+ args[1] = "list"
+ if q.LibraryNamePattern != "" {
+ args = append(args, "libraryname", q.LibraryNamePattern)
+ }
+ if q.WithCode {
+ args = append(args, "withcode")
+ }
+ cmd := NewFunctionListCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionDump(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "dump")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionRestore(ctx context.Context, libDump string) *StringCmd {
+ cmd := NewStringCmd(ctx, "function", "restore", libDump)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FunctionStats(ctx context.Context) *FunctionStatsCmd {
+ cmd := NewFunctionStatsCmd(ctx, "function", "stats")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// FCallRo this function simply calls FCallRO,
+// Deprecated: to maintain convention FCallRO.
+func (c cmdable) FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ return c.FCallRO(ctx, function, keys, args...)
+}
+
+func (c cmdable) FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := fcallArgs("fcall_ro", function, keys, args...)
+ cmd := NewCmd(ctx, cmdArgs...)
+ if len(keys) > 0 {
+ cmd.SetFirstKeyPos(3)
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func fcallArgs(command string, function string, keys []string, args ...interface{}) []interface{} {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = command
+ cmdArgs[1] = function
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+
+ cmdArgs = append(cmdArgs, args...)
+ return cmdArgs
+}
diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go
new file mode 100644
index 0000000000..188f88494e
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sentinel.go
@@ -0,0 +1,838 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/redis/go-redis/v9/internal"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // If specified with SentinelPassword, enables ACL-based authentication (via
+ // AUTH ).
+ SentinelUsername string
+ // Sentinel password from "requirepass " (if enabled) in Sentinel
+ // configuration, or, if SentinelUsername is also supplied, used for ACL-based
+ // authentication.
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or replica node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to replica read-only nodes.
+ ReplicaOnly bool
+
+ // Use replicas disconnected with master when cannot get connected replicas
+ // Now, this option only works in RandomReplicaAddr function.
+ UseDisconnectedReplicas bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ DisableIndentity bool
+ IdentitySuffix string
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Username: opt.SentinelUsername,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ ClientName: opt.ClientName,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Protocol: opt.Protocol,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+ ContextTimeoutEnabled: opt.ContextTimeoutEnabled,
+
+ PoolFIFO: opt.PoolFIFO,
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ MinIdleConns: opt.MinIdleConns,
+ MaxIdleConns: opt.MaxIdleConns,
+ MaxActiveConns: opt.MaxActiveConns,
+ ConnMaxIdleTime: opt.ConnMaxIdleTime,
+ ConnMaxLifetime: opt.ConnMaxLifetime,
+
+ TLSConfig: opt.TLSConfig,
+
+ DisableIndentity: opt.DisableIndentity,
+ IdentitySuffix: opt.IdentitySuffix,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+ sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+ })
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterReplicaDialer(failover)
+ opt.init()
+
+ var connPool *pool.ConnPool
+
+ rdb := &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+ rdb.init()
+
+ connPool = newConnPool(opt, rdb.dialHook)
+ rdb.connPool = connPool
+ rdb.onClose = failover.Close
+
+ failover.mu.Lock()
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+ failover.mu.Unlock()
+
+ return rdb
+}
+
+func masterReplicaDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.ReplicaOnly {
+ addr, err = failover.RandomReplicaAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+
+ netDialer := &net.Dialer{
+ Timeout: failover.opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if failover.opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooksMixin
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ },
+ }
+
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ })
+ c.connPool = newConnPool(opt, c.dialHook)
+
+ return c
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every replica and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *MapStringStringCmd {
+ cmd := NewMapStringStringCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Replicas shows a list of replicas for the specified master and their state.
+func (c *SentinelClient) Replicas(ctx context.Context, name string) *MapStringStringSliceCmd {
+ cmd := NewMapStringStringSliceCmd(ctx, "sentinel", "replicas", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomReplicaAddr(ctx context.Context) (string, error) {
+ if c.opt == nil {
+ return "", errors.New("opt is nil")
+ }
+
+ addresses, err := c.replicaAddrs(ctx, false)
+ if err != nil {
+ return "", err
+ }
+
+ if len(addresses) == 0 && c.opt.UseDisconnectedReplicas {
+ addresses, err = c.replicaAddrs(ctx, true)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr, err := c.getMasterAddr(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else {
+ return addr, nil
+ }
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return "", err
+ }
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, sentinel)
+ if err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs, err := c.getReplicaAddrs(ctx, c.sentinel)
+ if err != nil {
+ _ = c.closeSentinel()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ // Continue on other errors
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ } else if len(addrs) > 0 {
+ return addrs, nil
+ } else {
+ // No error and no replicas.
+ _ = c.closeSentinel()
+ }
+ }
+
+ var sentinelReachable bool
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ replicas, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ _ = sentinel.Close()
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+ internal.Logger.Printf(ctx, "sentinel: Replicas master=%q failed: %s",
+ c.opt.MasterName, err)
+ continue
+ }
+ sentinelReachable = true
+ addrs := parseReplicaAddrs(replicas, useDisconnected)
+ if len(addrs) == 0 {
+ continue
+ }
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ return addrs, nil
+ }
+
+ if sentinelReachable {
+ return []string{}, nil
+ }
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) (string, error) {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ return "", err
+ }
+ return net.JoinHostPort(addr[0], addr[1]), nil
+}
+
+func (c *sentinelFailover) getReplicaAddrs(ctx context.Context, sentinel *SentinelClient) ([]string, error) {
+ addrs, err := sentinel.Replicas(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s",
+ c.opt.MasterName, err)
+ return nil, err
+ }
+ return parseReplicaAddrs(addrs, false), nil
+}
+
+func parseReplicaAddrs(addrs []map[string]string, keepDisconnected bool) []string {
+ nodes := make([]string, 0, len(addrs))
+ for _, node := range addrs {
+ isDown := false
+ if flags, ok := node["flags"]; ok {
+ for _, flag := range strings.Split(flags, ",") {
+ switch flag {
+ case "s_down", "o_down":
+ isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
+ }
+ }
+ }
+ if !isDown && node["ip"] != "" && node["port"] != "" {
+ nodes = append(nodes, net.JoinHostPort(node["ip"], node["port"]))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr //nolint:ifshort
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+replica-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ ip, ok := sentinel["ip"]
+ if !ok {
+ continue
+ }
+ port, ok := sentinel["port"]
+ if !ok {
+ continue
+ }
+ if ip != "" && port != "" {
+ sentinelAddr := net.JoinHostPort(ip, port)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a replica node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ replicaAddrs, err := failover.replicaAddrs(ctx, false)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, replicaAddr := range replicaAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: replicaAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+
+ failover.mu.Lock()
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+ failover.mu.Unlock()
+
+ return c
+}
diff --git a/vendor/github.com/redis/go-redis/v9/set_commands.go b/vendor/github.com/redis/go-redis/v9/set_commands.go
new file mode 100644
index 0000000000..cef8ad6d8b
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/set_commands.go
@@ -0,0 +1,217 @@
+package redis
+
+import "context"
+
+type SetCmdable interface {
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "sintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/sortedset_commands.go b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
new file mode 100644
index 0000000000..6701402703
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/sortedset_commands.go
@@ -0,0 +1,772 @@
+package redis
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+type SortedSetCmdable interface {
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+ ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd
+ ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// BZMPop is the blocking variant of ZMPOP.
+// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP.
+// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses.
+// A timeout of zero can be used to block indefinitely.
+// example: client.BZMPop(ctx, 0,"max", 1, "set")
+func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 3+len(keys), 6+len(keys))
+ args[0] = "bzmpop"
+ args[1] = formatSec(ctx, timeout)
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
+ }
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ Members: members,
+ })
+}
+
+// ZAddLT Redis `ZADD key LT score member [score member ...]` command.
+func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ LT: true,
+ Members: members,
+ })
+}
+
+// ZAddGT Redis `ZADD key GT score member [score member ...]` command.
+func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ GT: true,
+ Members: members,
+ })
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ NX: true,
+ Members: members,
+ })
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd {
+ return c.ZAddArgs(ctx, key, ZAddArgs{
+ XX: true,
+ Members: members,
+ })
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd {
+ args := make([]interface{}, 4+len(keys))
+ args[0] = "zintercard"
+ numkeys := int64(0)
+ for i, key := range keys {
+ args[2+i] = key
+ numkeys++
+ }
+ args[1] = numkeys
+ args[2+numkeys] = "limit"
+ args[3+numkeys] = limit
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names.
+// direction: "max" (highest score) or "min" (lowest score), count: > 0
+// example: client.ZMPop(ctx, "max", 5, "set1", "set2")
+func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd {
+ args := make([]interface{}, 2+len(keys), 5+len(keys))
+ args[0] = "zmpop"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ args = append(args, strings.ToLower(order), "count", count)
+ cmd := NewZSliceWithKeyCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+//
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by and are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the and options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of and .
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
+ }
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
+ }
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRankWithScore according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRevRangeWithScores according to the Redis documentation, if member does not exist
+// in the sorted set or key does not exist, it will return a redis.Nil error.
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd {
+ cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMemberWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
diff --git a/vendor/github.com/redis/go-redis/v9/stream_commands.go b/vendor/github.com/redis/go-redis/v9/stream_commands.go
new file mode 100644
index 0000000000..0a9869202a
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/stream_commands.go
@@ -0,0 +1,438 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StreamCmdable interface {
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+}
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 6+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 10+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(4)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.SetFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Idle time.Duration
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 5+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+//
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/string_commands.go b/vendor/github.com/redis/go-redis/v9/string_commands.go
new file mode 100644
index 0000000000..eff5880dcd
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/string_commands.go
@@ -0,0 +1,303 @@
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+type StringCmdable interface {
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ LCS(ctx context.Context, q *LCSQuery) *LCSCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd {
+ cmd := NewLCSCmd(ctx, q)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSet(struct), For struct types, see HSet description.
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+// - MSetNX(struct), For struct types, see HSet description.
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEx`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEx Redis `SETEx key expiration value` command.
+func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
new file mode 100644
index 0000000000..6f1b2fa458
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go
@@ -0,0 +1,922 @@
+package redis
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/redis/go-redis/v9/internal/proto"
+)
+
+type TimeseriesCmdable interface {
+ TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd
+ TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd
+ TSCreate(ctx context.Context, key string) *StatusCmd
+ TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd
+ TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd
+ TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd
+ TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd
+ TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd
+ TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd
+ TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd
+ TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd
+ TSGet(ctx context.Context, key string) *TSTimestampValueCmd
+ TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd
+ TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd
+ TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd
+ TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd
+ TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd
+ TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd
+ TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd
+ TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd
+ TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd
+ TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd
+ TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd
+ TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd
+ TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd
+}
+
+type TSOptions struct {
+ Retention int
+ ChunkSize int
+ Encoding string
+ DuplicatePolicy string
+ Labels map[string]string
+}
+type TSIncrDecrOptions struct {
+ Timestamp int64
+ Retention int
+ ChunkSize int
+ Uncompressed bool
+ Labels map[string]string
+}
+
+type TSAlterOptions struct {
+ Retention int
+ ChunkSize int
+ DuplicatePolicy string
+ Labels map[string]string
+}
+
+type TSCreateRuleOptions struct {
+ alignTimestamp int64
+}
+
+type TSGetOptions struct {
+ Latest bool
+}
+
+type TSInfoOptions struct {
+ Debug bool
+}
+type Aggregator int
+
+const (
+ Invalid = Aggregator(iota)
+ Avg
+ Sum
+ Min
+ Max
+ Range
+ Count
+ First
+ Last
+ StdP
+ StdS
+ VarP
+ VarS
+ Twa
+)
+
+func (a Aggregator) String() string {
+ switch a {
+ case Invalid:
+ return ""
+ case Avg:
+ return "AVG"
+ case Sum:
+ return "SUM"
+ case Min:
+ return "MIN"
+ case Max:
+ return "MAX"
+ case Range:
+ return "RANGE"
+ case Count:
+ return "COUNT"
+ case First:
+ return "FIRST"
+ case Last:
+ return "LAST"
+ case StdP:
+ return "STD.P"
+ case StdS:
+ return "STD.S"
+ case VarP:
+ return "VAR.P"
+ case VarS:
+ return "VAR.S"
+ case Twa:
+ return "TWA"
+ default:
+ return ""
+ }
+}
+
+type TSRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+}
+
+type TSMRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMRevRangeOptions struct {
+ Latest bool
+ FilterByTS []int
+ FilterByValue []int
+ WithLabels bool
+ SelectedLabels []interface{}
+ Count int
+ Align interface{}
+ Aggregator Aggregator
+ BucketDuration int
+ BucketTimestamp interface{}
+ Empty bool
+ GroupByLabel interface{}
+ Reducer interface{}
+}
+
+type TSMGetOptions struct {
+ Latest bool
+ WithLabels bool
+ SelectedLabels []interface{}
+}
+
+// TSAdd - Adds one or more observations to a t-digest sketch.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAdd(ctx context.Context, key string, timestamp interface{}, value float64) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAddWithArgs - Adds one or more observations to a t-digest sketch.
+// This function also allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.add/
+func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interface{}, value float64, options *TSOptions) *IntCmd {
+ args := []interface{}{"TS.ADD", key, timestamp, value}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreate - Creates a new time-series key.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreate(ctx context.Context, key string) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateWithArgs - Creates a new time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize, Encoding, DuplicatePolicy and Labels.
+// For more information - https://redis.io/commands/ts.create/
+func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATE", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Encoding != "" {
+ args = append(args, "ENCODING", options.Encoding)
+ }
+
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSAlter - Alters an existing time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Retention, ChunkSize and DuplicatePolicy.
+// For more information - https://redis.io/commands/ts.alter/
+func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOptions) *StatusCmd {
+ args := []interface{}{"TS.ALTER", key}
+ if options != nil {
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.DuplicatePolicy != "" {
+ args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy)
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRule - Creates a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRule(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSCreateRuleWithArgs - Creates a compaction rule from sourceKey to destKey with additional option.
+// This function allows for specifying additional option such as:
+// alignTimestamp.
+// For more information - https://redis.io/commands/ts.createrule/
+func (c cmdable) TSCreateRuleWithArgs(ctx context.Context, sourceKey string, destKey string, aggregator Aggregator, bucketDuration int, options *TSCreateRuleOptions) *StatusCmd {
+ args := []interface{}{"TS.CREATERULE", sourceKey, destKey, "AGGREGATION", aggregator.String(), bucketDuration}
+ if options != nil {
+ if options.alignTimestamp != 0 {
+ args = append(args, options.alignTimestamp)
+ }
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrBy - Increments the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.INCRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSIncrByWithArgs - Increments the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.incrby/
+func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.INCRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrBy - Decrements the value of a time-series key by the specified timestamp.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrBy(ctx context.Context, Key string, timestamp float64) *IntCmd {
+ args := []interface{}{"TS.DECRBY", Key, timestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDecrByWithArgs - Decrements the value of a time-series key by the specified timestamp with additional options.
+// This function allows for specifying additional options such as:
+// Timestamp, Retention, ChunkSize, Uncompressed and Labels.
+// For more information - https://redis.io/commands/ts.decrby/
+func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp float64, options *TSIncrDecrOptions) *IntCmd {
+ args := []interface{}{"TS.DECRBY", key, timestamp}
+ if options != nil {
+ if options.Timestamp != 0 {
+ args = append(args, "TIMESTAMP", options.Timestamp)
+ }
+ if options.Retention != 0 {
+ args = append(args, "RETENTION", options.Retention)
+ }
+ if options.ChunkSize != 0 {
+ args = append(args, "CHUNK_SIZE", options.ChunkSize)
+ }
+ if options.Uncompressed {
+ args = append(args, "UNCOMPRESSED")
+ }
+ if options.Labels != nil {
+ args = append(args, "LABELS")
+ for label, value := range options.Labels {
+ args = append(args, label, value)
+ }
+ }
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDel - Deletes a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.del/
+func (c cmdable) TSDel(ctx context.Context, Key string, fromTimestamp int, toTimestamp int) *IntCmd {
+ args := []interface{}{"TS.DEL", Key, fromTimestamp, toTimestamp}
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSDeleteRule - Deletes a compaction rule from sourceKey to destKey.
+// For more information - https://redis.io/commands/ts.deleterule/
+func (c cmdable) TSDeleteRule(ctx context.Context, sourceKey string, destKey string) *StatusCmd {
+ args := []interface{}{"TS.DELETERULE", sourceKey, destKey}
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGetWithArgs - Gets the last sample of a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Latest.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGetWithArgs(ctx context.Context, key string, options *TSGetOptions) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ }
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSGet - Gets the last sample of a time-series key.
+// For more information - https://redis.io/commands/ts.get/
+func (c cmdable) TSGet(ctx context.Context, key string) *TSTimestampValueCmd {
+ args := []interface{}{"TS.GET", key}
+ cmd := newTSTimestampValueCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValue struct {
+ Timestamp int64
+ Value float64
+}
+type TSTimestampValueCmd struct {
+ baseCmd
+ val TSTimestampValue
+}
+
+func newTSTimestampValueCmd(ctx context.Context, args ...interface{}) *TSTimestampValueCmd {
+ return &TSTimestampValueCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueCmd) SetVal(val TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueCmd) Result() (TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueCmd) Val() TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadMapLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = TSTimestampValue{}
+ for i := 0; i < n; i++ {
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val.Timestamp = timestamp
+ cmd.val.Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSInfo - Returns information about a time-series key.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfo(ctx context.Context, key string) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSInfoWithArgs - Returns information about a time-series key with additional option.
+// This function allows for specifying additional option such as:
+// Debug.
+// For more information - https://redis.io/commands/ts.info/
+func (c cmdable) TSInfoWithArgs(ctx context.Context, key string, options *TSInfoOptions) *MapStringInterfaceCmd {
+ args := []interface{}{"TS.INFO", key}
+ if options != nil {
+ if options.Debug {
+ args = append(args, "DEBUG")
+ }
+ }
+ cmd := NewMapStringInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMAdd - Adds multiple samples to multiple time-series keys.
+// It accepts a slice of 'ktv' slices, each containing exactly three elements: key, timestamp, and value.
+// This struct must be provided for this command to work.
+// For more information - https://redis.io/commands/ts.madd/
+func (c cmdable) TSMAdd(ctx context.Context, ktvSlices [][]interface{}) *IntSliceCmd {
+ args := []interface{}{"TS.MADD"}
+ for _, ktv := range ktvSlices {
+ args = append(args, ktv...)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSQueryIndex - Returns all the keys matching the filter expression.
+// For more information - https://redis.io/commands/ts.queryindex/
+func (c cmdable) TSQueryIndex(ctx context.Context, filterExpr []string) *StringSliceCmd {
+ args := []interface{}{"TS.QUERYINDEX"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRange - Returns a range of samples from a time-series key in reverse order.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRevRangeWithArgs - Returns a range of samples from a time-series key in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.revrange/
+func (c cmdable) TSRevRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRevRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.REVRANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRange - Returns a range of samples from a time-series key.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRange(ctx context.Context, key string, fromTimestamp int, toTimestamp int) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSRangeWithArgs - Returns a range of samples from a time-series key with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, Count, Align, Aggregator,
+// BucketDuration, BucketTimestamp and Empty.
+// For more information - https://redis.io/commands/ts.range/
+func (c cmdable) TSRangeWithArgs(ctx context.Context, key string, fromTimestamp int, toTimestamp int, options *TSRangeOptions) *TSTimestampValueSliceCmd {
+ args := []interface{}{"TS.RANGE", key, fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ cmd := newTSTimestampValueSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type TSTimestampValueSliceCmd struct {
+ baseCmd
+ val []TSTimestampValue
+}
+
+func newTSTimestampValueSliceCmd(ctx context.Context, args ...interface{}) *TSTimestampValueSliceCmd {
+ return &TSTimestampValueSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TSTimestampValueSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TSTimestampValueSliceCmd) SetVal(val []TSTimestampValue) {
+ cmd.val = val
+}
+
+func (cmd *TSTimestampValueSliceCmd) Result() ([]TSTimestampValue, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TSTimestampValueSliceCmd) Val() []TSTimestampValue {
+ return cmd.val
+}
+
+func (cmd *TSTimestampValueSliceCmd) readReply(rd *proto.Reader) (err error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ cmd.val = make([]TSTimestampValue, n)
+ for i := 0; i < n; i++ {
+ _, _ = rd.ReadArrayLen()
+ timestamp, err := rd.ReadInt()
+ if err != nil {
+ return err
+ }
+ value, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+ cmd.val[i].Timestamp = timestamp
+ cmd.val[i].Value, err = strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TSMRange - Returns a range of samples from multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRangeWithArgs - Returns a range of samples from multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrange/
+func (c cmdable) TSMRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRange - Returns a range of samples from multiple time-series keys in reverse order.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRange(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp, "FILTER"}
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMRevRangeWithArgs - Returns a range of samples from multiple time-series keys in reverse order with additional options.
+// This function allows for specifying additional options such as:
+// Latest, FilterByTS, FilterByValue, WithLabels, SelectedLabels,
+// Count, Align, Aggregator, BucketDuration, BucketTimestamp,
+// Empty, GroupByLabel and Reducer.
+// For more information - https://redis.io/commands/ts.mrevrange/
+func (c cmdable) TSMRevRangeWithArgs(ctx context.Context, fromTimestamp int, toTimestamp int, filterExpr []string, options *TSMRevRangeOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MREVRANGE", fromTimestamp, toTimestamp}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.FilterByTS != nil {
+ args = append(args, "FILTER_BY_TS")
+ for _, f := range options.FilterByTS {
+ args = append(args, f)
+ }
+ }
+ if options.FilterByValue != nil {
+ args = append(args, "FILTER_BY_VALUE")
+ for _, f := range options.FilterByValue {
+ args = append(args, f)
+ }
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ if options.Count != 0 {
+ args = append(args, "COUNT", options.Count)
+ }
+ if options.Align != nil {
+ args = append(args, "ALIGN", options.Align)
+ }
+ if options.Aggregator != 0 {
+ args = append(args, "AGGREGATION", options.Aggregator.String())
+ }
+ if options.BucketDuration != 0 {
+ args = append(args, options.BucketDuration)
+ }
+ if options.BucketTimestamp != nil {
+ args = append(args, "BUCKETTIMESTAMP", options.BucketTimestamp)
+ }
+ if options.Empty {
+ args = append(args, "EMPTY")
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filterExpr {
+ args = append(args, f)
+ }
+ if options != nil {
+ if options.GroupByLabel != nil {
+ args = append(args, "GROUPBY", options.GroupByLabel)
+ }
+ if options.Reducer != nil {
+ args = append(args, "REDUCE", options.Reducer)
+ }
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGet - Returns the last sample of multiple time-series keys.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGet(ctx context.Context, filters []string) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET", "FILTER"}
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TSMGetWithArgs - Returns the last sample of multiple time-series keys with additional options.
+// This function allows for specifying additional options such as:
+// Latest, WithLabels and SelectedLabels.
+// For more information - https://redis.io/commands/ts.mget/
+func (c cmdable) TSMGetWithArgs(ctx context.Context, filters []string, options *TSMGetOptions) *MapStringSliceInterfaceCmd {
+ args := []interface{}{"TS.MGET"}
+ if options != nil {
+ if options.Latest {
+ args = append(args, "LATEST")
+ }
+ if options.WithLabels {
+ args = append(args, "WITHLABELS")
+ }
+ if options.SelectedLabels != nil {
+ args = append(args, "SELECTED_LABELS")
+ args = append(args, options.SelectedLabels...)
+ }
+ }
+ args = append(args, "FILTER")
+ for _, f := range filters {
+ args = append(args, f)
+ }
+ cmd := NewMapStringSliceInterfaceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go
similarity index 79%
rename from vendor/github.com/go-redis/redis/v8/tx.go
rename to vendor/github.com/redis/go-redis/v9/tx.go
index 8c9d87202a..039eaf3516 100644
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ b/vendor/github.com/redis/go-redis/v9/tx.go
@@ -3,8 +3,8 @@ package redis
import (
"context"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/redis/go-redis/v9/internal/pool"
+ "github.com/redis/go-redis/v9/internal/proto"
)
// TxFailedErr transaction redis failed.
@@ -19,18 +19,16 @@ type Tx struct {
baseClient
cmdable
statefulCmdable
- hooks
- ctx context.Context
+ hooksMixin
}
-func (c *Client) newTx(ctx context.Context) *Tx {
+func (c *Client) newTx() *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool),
},
- hooks: c.hooks.clone(),
- ctx: ctx,
+ hooksMixin: c.hooksMixin.clone(),
}
tx.init()
return &tx
@@ -39,25 +37,19 @@ func (c *Client) newTx(ctx context.Context) *Tx {
func (c *Tx) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
-}
-
-func (c *Tx) Context() context.Context {
- return c.ctx
-}
-func (c *Tx) WithContext(ctx context.Context) *Tx {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.init()
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
+ c.initHooks(hooks{
+ dial: c.baseClient.dial,
+ process: c.baseClient.process,
+ pipeline: c.baseClient.processPipeline,
+ txPipeline: c.baseClient.processTxPipeline,
+ })
}
func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
+ err := c.processHook(ctx, cmd)
+ cmd.SetErr(err)
+ return err
}
// Watch prepares a transaction and marks the keys to be watched
@@ -65,7 +57,7 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
//
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- tx := c.newTx(ctx)
+ tx := c.newTx()
defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
@@ -109,9 +101,8 @@ func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ return c.processPipelineHook(ctx, cmds)
},
}
pipe.init()
@@ -139,11 +130,22 @@ func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
func (c *Tx) TxPipeline() Pipeliner {
pipe := Pipeline{
- ctx: c.ctx,
exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ cmds = wrapMultiExec(ctx, cmds)
+ return c.processTxPipelineHook(ctx, cmds)
},
}
pipe.init()
return &pipe
}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdsCopy := make([]Cmder, len(cmds)+2)
+ cmdsCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdsCopy[1:], cmds)
+ cmdsCopy[len(cmdsCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdsCopy
+}
diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go
new file mode 100644
index 0000000000..275bef3d60
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/universal.go
@@ -0,0 +1,247 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn.
+ ClientName string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Protocol int
+ Username string
+ Password string
+ SentinelUsername string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ ContextTimeoutEnabled bool
+
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
+ PoolSize int
+ PoolTimeout time.Duration
+ MinIdleConns int
+ MaxIdleConns int
+ MaxActiveConns int
+ ConnMaxIdleTime time.Duration
+ ConnMaxLifetime time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+
+ MasterName string
+
+ DisableIndentity bool
+ IdentitySuffix string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+ ClientName: o.ClientName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelUsername: o.SentinelUsername,
+ SentinelPassword: o.SentinelPassword,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ ClientName: o.ClientName,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Protocol: o.Protocol,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ ContextTimeoutEnabled: o.ContextTimeoutEnabled,
+
+ PoolFIFO: o.PoolFIFO,
+ PoolSize: o.PoolSize,
+ PoolTimeout: o.PoolTimeout,
+ MinIdleConns: o.MinIdleConns,
+ MaxIdleConns: o.MaxIdleConns,
+ MaxActiveConns: o.MaxActiveConns,
+ ConnMaxIdleTime: o.ConnMaxIdleTime,
+ ConnMaxLifetime: o.ConnMaxLifetime,
+
+ TLSConfig: o.TLSConfig,
+
+ DisableIndentity: o.DisableIndentity,
+ IdentitySuffix: o.IdentitySuffix,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+ Cmdable
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ SSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}
diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go
new file mode 100644
index 0000000000..e2c7f3e718
--- /dev/null
+++ b/vendor/github.com/redis/go-redis/v9/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+ return "9.5.1"
+}
diff --git a/vendor/github.com/streadway/amqp/.gitignore b/vendor/github.com/streadway/amqp/.gitignore
deleted file mode 100644
index 667fb50c57..0000000000
--- a/vendor/github.com/streadway/amqp/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-certs/*
-spec/spec
-examples/simple-consumer/simple-consumer
-examples/simple-producer/simple-producer
-
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-.idea/**/contentModel.xml
diff --git a/vendor/github.com/streadway/amqp/.travis.yml b/vendor/github.com/streadway/amqp/.travis.yml
deleted file mode 100644
index 7eee262b4b..0000000000
--- a/vendor/github.com/streadway/amqp/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-language: go
-
-go:
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
-
-addons:
- apt:
- packages:
- - rabbitmq-server
-
-services:
- - rabbitmq
-
-env:
- - GO111MODULE=on AMQP_URL=amqp://guest:guest@127.0.0.1:5672/
-
-before_install:
- - go get -v golang.org/x/lint/golint
-
-script:
- - ./pre-commit
- - go test -cpu=1,2 -v -tags integration ./...
diff --git a/vendor/github.com/streadway/amqp/CONTRIBUTING.md b/vendor/github.com/streadway/amqp/CONTRIBUTING.md
deleted file mode 100644
index c87f3d7e0f..0000000000
--- a/vendor/github.com/streadway/amqp/CONTRIBUTING.md
+++ /dev/null
@@ -1,35 +0,0 @@
-## Prequisites
-
-1. Go: [https://golang.org/dl/](https://golang.org/dl/)
-1. Golint `go get -u -v github.com/golang/lint/golint`
-
-## Contributing
-
-The workflow is pretty standard:
-
-1. Fork github.com/streadway/amqp
-1. Add the pre-commit hook: `ln -s ../../pre-commit .git/hooks/pre-commit`
-1. Create your feature branch (`git checkout -b my-new-feature`)
-1. Run integration tests (see below)
-1. **Implement tests**
-1. Implement fixs
-1. Commit your changes (`git commit -am 'Add some feature'`)
-1. Push to a branch (`git push -u origin my-new-feature`)
-1. Submit a pull request
-
-## Running Tests
-
-The test suite assumes that:
-
- * A RabbitMQ node is running on localhost with all defaults: [https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html)
- * `AMQP_URL` is exported to `amqp://guest:guest@127.0.0.1:5672/`
-
-### Integration Tests
-
-After starting a local RabbitMQ, run integration tests with the following:
-
- env AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ go test -v -cpu 2 -tags integration -race
-
-All integration tests should use the `integrationConnection(...)` test
-helpers defined in `integration_test.go` to setup the integration environment
-and logging.
diff --git a/vendor/github.com/streadway/amqp/LICENSE b/vendor/github.com/streadway/amqp/LICENSE
deleted file mode 100644
index 07b89680a7..0000000000
--- a/vendor/github.com/streadway/amqp/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2012-2019, Sean Treadway, SoundCloud Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this
-list of conditions and the following disclaimer.
-
-Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/streadway/amqp/README.md b/vendor/github.com/streadway/amqp/README.md
deleted file mode 100644
index fd179dc296..0000000000
--- a/vendor/github.com/streadway/amqp/README.md
+++ /dev/null
@@ -1,100 +0,0 @@
-[![Build Status](https://api.travis-ci.org/streadway/amqp.svg)](http://travis-ci.org/streadway/amqp) [![GoDoc](https://godoc.org/github.com/streadway/amqp?status.svg)](http://godoc.org/github.com/streadway/amqp)
-
-# Go RabbitMQ Client Library (Unmaintained Fork)
-
-## Beware of Abandonware
-
-This repository is **NOT ACTIVELY MAINTAINED**. Consider using
-a different fork instead: [rabbitmq/amqp091-go](https://github.com/rabbitmq/amqp091-go).
-In case of questions, start a discussion in that repo or [use other RabbitMQ community resources](https://rabbitmq.com/contact.html).
-
-
-
-## Project Maturity
-
-This project has been used in production systems for many years. As of 2022,
-this repository is **NOT ACTIVELY MAINTAINED**.
-
-This repository is **very strict** about any potential public API changes.
-You may want to consider [rabbitmq/amqp091-go](https://github.com/rabbitmq/amqp091-go) which
-is more willing to adapt the API.
-
-
-## Supported Go Versions
-
-This library supports two most recent Go release series, currently 1.10 and 1.11.
-
-
-## Supported RabbitMQ Versions
-
-This project supports RabbitMQ versions starting with `2.0` but primarily tested
-against reasonably recent `3.x` releases. Some features and behaviours may be
-server version-specific.
-
-## Goals
-
-Provide a functional interface that closely represents the AMQP 0.9.1 model
-targeted to RabbitMQ as a server. This includes the minimum necessary to
-interact the semantics of the protocol.
-
-## Non-goals
-
-Things not intended to be supported.
-
- * Auto reconnect and re-synchronization of client and server topologies.
- * Reconnection would require understanding the error paths when the
- topology cannot be declared on reconnect. This would require a new set
- of types and code paths that are best suited at the call-site of this
- package. AMQP has a dynamic topology that needs all peers to agree. If
- this doesn't happen, the behavior is undefined. Instead of producing a
- possible interface with undefined behavior, this package is designed to
- be simple for the caller to implement the necessary connection-time
- topology declaration so that reconnection is trivial and encapsulated in
- the caller's application code.
- * AMQP Protocol negotiation for forward or backward compatibility.
- * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent
- specifications that change the semantics and wire format of the protocol.
- We will accept patches for other protocol support but have no plans for
- implementation ourselves.
- * Anything other than PLAIN and EXTERNAL authentication mechanisms.
- * Keeping the mechanisms interface modular makes it possible to extend
- outside of this package. If other mechanisms prove to be popular, then
- we would accept patches to include them in this package.
-
-## Usage
-
-See the 'examples' subdirectory for simple producers and consumers executables.
-If you have a use-case in mind which isn't well-represented by the examples,
-please file an issue.
-
-## Documentation
-
-Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for
-reference and usage.
-
-[RabbitMQ tutorials in
-Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also
-available.
-
-## Contributing
-
-Pull requests are very much welcomed. Create your pull request on a non-master
-branch, make sure a test or example is included that covers your change and
-your commits represent coherent changes that include a reason for the change.
-
-To run the integration tests, make sure you have RabbitMQ running on any host,
-export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags
-integration`. TravisCI will also run the integration tests.
-
-Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors).
-
-## External packages
-
- * [Google App Engine Dialer support](https://github.com/soundtrackyourbrand/gaeamqp)
- * [RabbitMQ examples in Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go)
-
-## License
-
-BSD 2 clause - see LICENSE for more details.
-
-
diff --git a/vendor/github.com/streadway/amqp/allocator.go b/vendor/github.com/streadway/amqp/allocator.go
deleted file mode 100644
index 53620e7d0c..0000000000
--- a/vendor/github.com/streadway/amqp/allocator.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package amqp
-
-import (
- "bytes"
- "fmt"
- "math/big"
-)
-
-const (
- free = 0
- allocated = 1
-)
-
-// allocator maintains a bitset of allocated numbers.
-type allocator struct {
- pool *big.Int
- last int
- low int
- high int
-}
-
-// NewAllocator reserves and frees integers out of a range between low and
-// high.
-//
-// O(N) worst case space used, where N is maximum allocated, divided by
-// sizeof(big.Word)
-func newAllocator(low, high int) *allocator {
- return &allocator{
- pool: big.NewInt(0),
- last: low,
- low: low,
- high: high,
- }
-}
-
-// String returns a string describing the contents of the allocator like
-// "allocator[low..high] reserved..until"
-//
-// O(N) where N is high-low
-func (a allocator) String() string {
- b := &bytes.Buffer{}
- fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high)
-
- for low := a.low; low <= a.high; low++ {
- high := low
- for a.reserved(high) && high <= a.high {
- high++
- }
-
- if high > low+1 {
- fmt.Fprintf(b, " %d..%d", low, high-1)
- } else if high > low {
- fmt.Fprintf(b, " %d", high-1)
- }
-
- low = high
- }
- return b.String()
-}
-
-// Next reserves and returns the next available number out of the range between
-// low and high. If no number is available, false is returned.
-//
-// O(N) worst case runtime where N is allocated, but usually O(1) due to a
-// rolling index into the oldest allocation.
-func (a *allocator) next() (int, bool) {
- wrapped := a.last
-
- // Find trailing bit
- for ; a.last <= a.high; a.last++ {
- if a.reserve(a.last) {
- return a.last, true
- }
- }
-
- // Find preceding free'd pool
- a.last = a.low
-
- for ; a.last < wrapped; a.last++ {
- if a.reserve(a.last) {
- return a.last, true
- }
- }
-
- return 0, false
-}
-
-// reserve claims the bit if it is not already claimed, returning true if
-// successfully claimed.
-func (a *allocator) reserve(n int) bool {
- if a.reserved(n) {
- return false
- }
- a.pool.SetBit(a.pool, n-a.low, allocated)
- return true
-}
-
-// reserved returns true if the integer has been allocated
-func (a *allocator) reserved(n int) bool {
- return a.pool.Bit(n-a.low) == allocated
-}
-
-// release frees the use of the number for another allocation
-func (a *allocator) release(n int) {
- a.pool.SetBit(a.pool, n-a.low, free)
-}
diff --git a/vendor/github.com/streadway/amqp/auth.go b/vendor/github.com/streadway/amqp/auth.go
deleted file mode 100644
index 435c94b12e..0000000000
--- a/vendor/github.com/streadway/amqp/auth.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-package amqp
-
-import (
- "fmt"
-)
-
-// Authentication interface provides a means for different SASL authentication
-// mechanisms to be used during connection tuning.
-type Authentication interface {
- Mechanism() string
- Response() string
-}
-
-// PlainAuth is a similar to Basic Auth in HTTP.
-type PlainAuth struct {
- Username string
- Password string
-}
-
-// Mechanism returns "PLAIN"
-func (auth *PlainAuth) Mechanism() string {
- return "PLAIN"
-}
-
-// Response returns the null character delimited encoding for the SASL PLAIN Mechanism.
-func (auth *PlainAuth) Response() string {
- return fmt.Sprintf("\000%s\000%s", auth.Username, auth.Password)
-}
-
-// AMQPlainAuth is similar to PlainAuth
-type AMQPlainAuth struct {
- Username string
- Password string
-}
-
-// Mechanism returns "AMQPLAIN"
-func (auth *AMQPlainAuth) Mechanism() string {
- return "AMQPLAIN"
-}
-
-// Response returns the null character delimited encoding for the SASL PLAIN Mechanism.
-func (auth *AMQPlainAuth) Response() string {
- return fmt.Sprintf("LOGIN:%sPASSWORD:%s", auth.Username, auth.Password)
-}
-
-// Finds the first mechanism preferred by the client that the server supports.
-func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) {
- for _, auth = range client {
- for _, mech := range serverMechanisms {
- if auth.Mechanism() == mech {
- return auth, true
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/streadway/amqp/confirms.go b/vendor/github.com/streadway/amqp/confirms.go
deleted file mode 100644
index 06cbaa7110..0000000000
--- a/vendor/github.com/streadway/amqp/confirms.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package amqp
-
-import "sync"
-
-// confirms resequences and notifies one or multiple publisher confirmation listeners
-type confirms struct {
- m sync.Mutex
- listeners []chan Confirmation
- sequencer map[uint64]Confirmation
- published uint64
- expecting uint64
-}
-
-// newConfirms allocates a confirms
-func newConfirms() *confirms {
- return &confirms{
- sequencer: map[uint64]Confirmation{},
- published: 0,
- expecting: 1,
- }
-}
-
-func (c *confirms) Listen(l chan Confirmation) {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.listeners = append(c.listeners, l)
-}
-
-// publish increments the publishing counter
-func (c *confirms) Publish() uint64 {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.published++
- return c.published
-}
-
-// confirm confirms one publishing, increments the expecting delivery tag, and
-// removes bookkeeping for that delivery tag.
-func (c *confirms) confirm(confirmation Confirmation) {
- delete(c.sequencer, c.expecting)
- c.expecting++
- for _, l := range c.listeners {
- l <- confirmation
- }
-}
-
-// resequence confirms any out of order delivered confirmations
-func (c *confirms) resequence() {
- for c.expecting <= c.published {
- sequenced, found := c.sequencer[c.expecting]
- if !found {
- return
- }
- c.confirm(sequenced)
- }
-}
-
-// one confirms one publishing and all following in the publishing sequence
-func (c *confirms) One(confirmed Confirmation) {
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.expecting == confirmed.DeliveryTag {
- c.confirm(confirmed)
- } else {
- c.sequencer[confirmed.DeliveryTag] = confirmed
- }
- c.resequence()
-}
-
-// multiple confirms all publishings up until the delivery tag
-func (c *confirms) Multiple(confirmed Confirmation) {
- c.m.Lock()
- defer c.m.Unlock()
-
- for c.expecting <= confirmed.DeliveryTag {
- c.confirm(Confirmation{c.expecting, confirmed.Ack})
- }
- c.resequence()
-}
-
-// Close closes all listeners, discarding any out of sequence confirmations
-func (c *confirms) Close() error {
- c.m.Lock()
- defer c.m.Unlock()
-
- for _, l := range c.listeners {
- close(l)
- }
- c.listeners = nil
- return nil
-}
diff --git a/vendor/github.com/streadway/amqp/connection.go b/vendor/github.com/streadway/amqp/connection.go
deleted file mode 100644
index 252852e80f..0000000000
--- a/vendor/github.com/streadway/amqp/connection.go
+++ /dev/null
@@ -1,852 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-package amqp
-
-import (
- "bufio"
- "crypto/tls"
- "io"
- "net"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-const (
- maxChannelMax = (2 << 15) - 1
-
- defaultHeartbeat = 10 * time.Second
- defaultConnectionTimeout = 30 * time.Second
- defaultProduct = "https://github.com/streadway/amqp"
- defaultVersion = "β"
- // Safer default that makes channel leaks a lot easier to spot
- // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593.
- defaultChannelMax = (2 << 10) - 1
- defaultLocale = "en_US"
-)
-
-// Config is used in DialConfig and Open to specify the desired tuning
-// parameters used during a connection open handshake. The negotiated tuning
-// will be stored in the returned connection's Config field.
-type Config struct {
- // The SASL mechanisms to try in the client request, and the successful
- // mechanism used on the Connection object.
- // If SASL is nil, PlainAuth from the URL is used.
- SASL []Authentication
-
- // Vhost specifies the namespace of permissions, exchanges, queues and
- // bindings on the server. Dial sets this to the path parsed from the URL.
- Vhost string
-
- ChannelMax int // 0 max channels means 2^16 - 1
- FrameSize int // 0 max bytes means unlimited
- Heartbeat time.Duration // less than 1s uses the server's interval
-
- // TLSClientConfig specifies the client configuration of the TLS connection
- // when establishing a tls transport.
- // If the URL uses an amqps scheme, then an empty tls.Config with the
- // ServerName from the URL is used.
- TLSClientConfig *tls.Config
-
- // Properties is table of properties that the client advertises to the server.
- // This is an optional setting - if the application does not set this,
- // the underlying library will use a generic set of client properties.
- Properties Table
-
- // Connection locale that we expect to always be en_US
- // Even though servers must return it as per the AMQP 0-9-1 spec,
- // we are not aware of it being used other than to satisfy the spec requirements
- Locale string
-
- // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig,
- // then an AMQP connection handshake.
- // If Dial is nil, net.DialTimeout with a 30s connection and 30s deadline is
- // used during TLS and AMQP handshaking.
- Dial func(network, addr string) (net.Conn, error)
-}
-
-// Connection manages the serialization and deserialization of frames from IO
-// and dispatches the frames to the appropriate channel. All RPC methods and
-// asynchronous Publishing, Delivery, Ack, Nack and Return messages are
-// multiplexed on this channel. There must always be active receivers for
-// every asynchronous message on this connection.
-type Connection struct {
- destructor sync.Once // shutdown once
- sendM sync.Mutex // conn writer mutex
- m sync.Mutex // struct field mutex
-
- conn io.ReadWriteCloser
-
- rpc chan message
- writer *writer
- sends chan time.Time // timestamps of each frame sent
- deadlines chan readDeadliner // heartbeater updates read deadlines
-
- allocator *allocator // id generator valid after openTune
- channels map[uint16]*Channel
-
- noNotify bool // true when we will never notify again
- closes []chan *Error
- blocks []chan Blocking
-
- errors chan *Error
-
- Config Config // The negotiated Config after connection.open
-
- Major int // Server's major version
- Minor int // Server's minor version
- Properties Table // Server properties
- Locales []string // Server locales
-
- closed int32 // Will be 1 if the connection is closed, 0 otherwise. Should only be accessed as atomic
-}
-
-type readDeadliner interface {
- SetReadDeadline(time.Time) error
-}
-
-// DefaultDial establishes a connection when config.Dial is not provided
-func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (net.Conn, error) {
- return func(network, addr string) (net.Conn, error) {
- conn, err := net.DialTimeout(network, addr, connectionTimeout)
- if err != nil {
- return nil, err
- }
-
- // Heartbeating hasn't started yet, don't stall forever on a dead server.
- // A deadline is set for TLS and AMQP handshaking. After AMQP is established,
- // the deadline is cleared in openComplete.
- if err := conn.SetDeadline(time.Now().Add(connectionTimeout)); err != nil {
- return nil, err
- }
-
- return conn, nil
- }
-}
-
-// Dial accepts a string in the AMQP URI format and returns a new Connection
-// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
-// seconds and sets the handshake deadline to 30 seconds. After handshake,
-// deadlines are cleared.
-//
-// Dial uses the zero value of tls.Config when it encounters an amqps://
-// scheme. It is equivalent to calling DialTLS(amqp, nil).
-func Dial(url string) (*Connection, error) {
- return DialConfig(url, Config{
- Heartbeat: defaultHeartbeat,
- Locale: defaultLocale,
- })
-}
-
-// DialTLS accepts a string in the AMQP URI format and returns a new Connection
-// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10
-// seconds and sets the initial read deadline to 30 seconds.
-//
-// DialTLS uses the provided tls.Config when encountering an amqps:// scheme.
-func DialTLS(url string, amqps *tls.Config) (*Connection, error) {
- return DialConfig(url, Config{
- Heartbeat: defaultHeartbeat,
- TLSClientConfig: amqps,
- Locale: defaultLocale,
- })
-}
-
-// DialConfig accepts a string in the AMQP URI format and a configuration for
-// the transport and connection setup, returning a new Connection. Defaults to
-// a server heartbeat interval of 10 seconds and sets the initial read deadline
-// to 30 seconds.
-func DialConfig(url string, config Config) (*Connection, error) {
- var err error
- var conn net.Conn
-
- uri, err := ParseURI(url)
- if err != nil {
- return nil, err
- }
-
- if config.SASL == nil {
- config.SASL = []Authentication{uri.PlainAuth()}
- }
-
- if config.Vhost == "" {
- config.Vhost = uri.Vhost
- }
-
- addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10))
-
- dialer := config.Dial
- if dialer == nil {
- dialer = DefaultDial(defaultConnectionTimeout)
- }
-
- conn, err = dialer("tcp", addr)
- if err != nil {
- return nil, err
- }
-
- if uri.Scheme == "amqps" {
- if config.TLSClientConfig == nil {
- config.TLSClientConfig = new(tls.Config)
- }
-
- // If ServerName has not been specified in TLSClientConfig,
- // set it to the URI host used for this connection.
- if config.TLSClientConfig.ServerName == "" {
- config.TLSClientConfig.ServerName = uri.Host
- }
-
- client := tls.Client(conn, config.TLSClientConfig)
- if err := client.Handshake(); err != nil {
-
- conn.Close()
- return nil, err
- }
-
- conn = client
- }
-
- return Open(conn, config)
-}
-
-/*
-Open accepts an already established connection, or other io.ReadWriteCloser as
-a transport. Use this method if you have established a TLS connection or wish
-to use your own custom transport.
-
-*/
-func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) {
- c := &Connection{
- conn: conn,
- writer: &writer{bufio.NewWriter(conn)},
- channels: make(map[uint16]*Channel),
- rpc: make(chan message),
- sends: make(chan time.Time),
- errors: make(chan *Error, 1),
- deadlines: make(chan readDeadliner, 1),
- }
- go c.reader(conn)
- return c, c.open(config)
-}
-
-/*
-LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr)
-as a fallback default value if the underlying transport does not support LocalAddr().
-*/
-func (c *Connection) LocalAddr() net.Addr {
- if conn, ok := c.conn.(interface {
- LocalAddr() net.Addr
- }); ok {
- return conn.LocalAddr()
- }
- return &net.TCPAddr{}
-}
-
-// ConnectionState returns basic TLS details of the underlying transport.
-// Returns a zero value when the underlying connection does not implement
-// ConnectionState() tls.ConnectionState.
-func (c *Connection) ConnectionState() tls.ConnectionState {
- if conn, ok := c.conn.(interface {
- ConnectionState() tls.ConnectionState
- }); ok {
- return conn.ConnectionState()
- }
- return tls.ConnectionState{}
-}
-
-/*
-NotifyClose registers a listener for close events either initiated by an error
-accompanying a connection.close method or by a normal shutdown.
-
-On normal shutdowns, the chan will be closed.
-
-To reconnect after a transport or protocol error, register a listener here and
-re-run your setup process.
-
-*/
-func (c *Connection) NotifyClose(receiver chan *Error) chan *Error {
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.noNotify {
- close(receiver)
- } else {
- c.closes = append(c.closes, receiver)
- }
-
- return receiver
-}
-
-/*
-NotifyBlocked registers a listener for RabbitMQ specific TCP flow control
-method extensions connection.blocked and connection.unblocked. Flow control is
-active with a reason when Blocking.Blocked is true. When a Connection is
-blocked, all methods will block across all connections until server resources
-become free again.
-
-This optional extension is supported by the server when the
-"connection.blocked" server capability key is true.
-
-*/
-func (c *Connection) NotifyBlocked(receiver chan Blocking) chan Blocking {
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.noNotify {
- close(receiver)
- } else {
- c.blocks = append(c.blocks, receiver)
- }
-
- return receiver
-}
-
-/*
-Close requests and waits for the response to close the AMQP connection.
-
-It's advisable to use this message when publishing to ensure all kernel buffers
-have been flushed on the server and client before exiting.
-
-An error indicates that server may not have received this request to close but
-the connection should be treated as closed regardless.
-
-After returning from this call, all resources associated with this connection,
-including the underlying io, Channels, Notify listeners and Channel consumers
-will also be closed.
-*/
-func (c *Connection) Close() error {
- if c.IsClosed() {
- return ErrClosed
- }
-
- defer c.shutdown(nil)
- return c.call(
- &connectionClose{
- ReplyCode: replySuccess,
- ReplyText: "kthxbai",
- },
- &connectionCloseOk{},
- )
-}
-
-func (c *Connection) closeWith(err *Error) error {
- if c.IsClosed() {
- return ErrClosed
- }
-
- defer c.shutdown(err)
- return c.call(
- &connectionClose{
- ReplyCode: uint16(err.Code),
- ReplyText: err.Reason,
- },
- &connectionCloseOk{},
- )
-}
-
-// IsClosed returns true if the connection is marked as closed, otherwise false
-// is returned.
-func (c *Connection) IsClosed() bool {
- return (atomic.LoadInt32(&c.closed) == 1)
-}
-
-func (c *Connection) send(f frame) error {
- if c.IsClosed() {
- return ErrClosed
- }
-
- c.sendM.Lock()
- err := c.writer.WriteFrame(f)
- c.sendM.Unlock()
-
- if err != nil {
- // shutdown could be re-entrant from signaling notify chans
- go c.shutdown(&Error{
- Code: FrameError,
- Reason: err.Error(),
- })
- } else {
- // Broadcast we sent a frame, reducing heartbeats, only
- // if there is something that can receive - like a non-reentrant
- // call or if the heartbeater isn't running
- select {
- case c.sends <- time.Now():
- default:
- }
- }
-
- return err
-}
-
-func (c *Connection) shutdown(err *Error) {
- atomic.StoreInt32(&c.closed, 1)
-
- c.destructor.Do(func() {
- c.m.Lock()
- defer c.m.Unlock()
-
- if err != nil {
- for _, c := range c.closes {
- c <- err
- }
- }
-
- if err != nil {
- c.errors <- err
- }
- // Shutdown handler goroutine can still receive the result.
- close(c.errors)
-
- for _, c := range c.closes {
- close(c)
- }
-
- for _, c := range c.blocks {
- close(c)
- }
-
- // Shutdown the channel, but do not use closeChannel() as it calls
- // releaseChannel() which requires the connection lock.
- //
- // Ranging over c.channels and calling releaseChannel() that mutates
- // c.channels is racy - see commit 6063341 for an example.
- for _, ch := range c.channels {
- ch.shutdown(err)
- }
-
- c.conn.Close()
-
- c.channels = map[uint16]*Channel{}
- c.allocator = newAllocator(1, c.Config.ChannelMax)
- c.noNotify = true
- })
-}
-
-// All methods sent to the connection channel should be synchronous so we
-// can handle them directly without a framing component
-func (c *Connection) demux(f frame) {
- if f.channel() == 0 {
- c.dispatch0(f)
- } else {
- c.dispatchN(f)
- }
-}
-
-func (c *Connection) dispatch0(f frame) {
- switch mf := f.(type) {
- case *methodFrame:
- switch m := mf.Method.(type) {
- case *connectionClose:
- // Send immediately as shutdown will close our side of the writer.
- c.send(&methodFrame{
- ChannelId: 0,
- Method: &connectionCloseOk{},
- })
-
- c.shutdown(newError(m.ReplyCode, m.ReplyText))
- case *connectionBlocked:
- for _, c := range c.blocks {
- c <- Blocking{Active: true, Reason: m.Reason}
- }
- case *connectionUnblocked:
- for _, c := range c.blocks {
- c <- Blocking{Active: false}
- }
- default:
- c.rpc <- m
- }
- case *heartbeatFrame:
- // kthx - all reads reset our deadline. so we can drop this
- default:
- // lolwat - channel0 only responds to methods and heartbeats
- c.closeWith(ErrUnexpectedFrame)
- }
-}
-
-func (c *Connection) dispatchN(f frame) {
- c.m.Lock()
- channel := c.channels[f.channel()]
- c.m.Unlock()
-
- if channel != nil {
- channel.recv(channel, f)
- } else {
- c.dispatchClosed(f)
- }
-}
-
-// section 2.3.7: "When a peer decides to close a channel or connection, it
-// sends a Close method. The receiving peer MUST respond to a Close with a
-// Close-Ok, and then both parties can close their channel or connection. Note
-// that if peers ignore Close, deadlock can happen when both peers send Close
-// at the same time."
-//
-// When we don't have a channel, so we must respond with close-ok on a close
-// method. This can happen between a channel exception on an asynchronous
-// method like basic.publish and a synchronous close with channel.close.
-// In that case, we'll get both a channel.close and channel.close-ok in any
-// order.
-func (c *Connection) dispatchClosed(f frame) {
- // Only consider method frames, drop content/header frames
- if mf, ok := f.(*methodFrame); ok {
- switch mf.Method.(type) {
- case *channelClose:
- c.send(&methodFrame{
- ChannelId: f.channel(),
- Method: &channelCloseOk{},
- })
- case *channelCloseOk:
- // we are already closed, so do nothing
- default:
- // unexpected method on closed channel
- c.closeWith(ErrClosed)
- }
- }
-}
-
-// Reads each frame off the IO and hand off to the connection object that
-// will demux the streams and dispatch to one of the opened channels or
-// handle on channel 0 (the connection channel).
-func (c *Connection) reader(r io.Reader) {
- buf := bufio.NewReader(r)
- frames := &reader{buf}
- conn, haveDeadliner := r.(readDeadliner)
-
- for {
- frame, err := frames.ReadFrame()
-
- if err != nil {
- c.shutdown(&Error{Code: FrameError, Reason: err.Error()})
- return
- }
-
- c.demux(frame)
-
- if haveDeadliner {
- select {
- case c.deadlines <- conn:
- default:
- // On c.Close() c.heartbeater() might exit just before c.deadlines <- conn is called.
- // Which results in this goroutine being stuck forever.
- }
- }
- }
-}
-
-// Ensures that at least one frame is being sent at the tuned interval with a
-// jitter tolerance of 1s
-func (c *Connection) heartbeater(interval time.Duration, done chan *Error) {
- const maxServerHeartbeatsInFlight = 3
-
- var sendTicks <-chan time.Time
- if interval > 0 {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- sendTicks = ticker.C
- }
-
- lastSent := time.Now()
-
- for {
- select {
- case at, stillSending := <-c.sends:
- // When actively sending, depend on sent frames to reset server timer
- if stillSending {
- lastSent = at
- } else {
- return
- }
-
- case at := <-sendTicks:
- // When idle, fill the space with a heartbeat frame
- if at.Sub(lastSent) > interval-time.Second {
- if err := c.send(&heartbeatFrame{}); err != nil {
- // send heartbeats even after close/closeOk so we
- // tick until the connection starts erroring
- return
- }
- }
-
- case conn := <-c.deadlines:
- // When reading, reset our side of the deadline, if we've negotiated one with
- // a deadline that covers at least 2 server heartbeats
- if interval > 0 {
- conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval))
- }
-
- case <-done:
- return
- }
- }
-}
-
-// Convenience method to inspect the Connection.Properties["capabilities"]
-// Table for server identified capabilities like "basic.ack" or
-// "confirm.select".
-func (c *Connection) isCapable(featureName string) bool {
- capabilities, _ := c.Properties["capabilities"].(Table)
- hasFeature, _ := capabilities[featureName].(bool)
- return hasFeature
-}
-
-// allocateChannel records but does not open a new channel with a unique id.
-// This method is the initial part of the channel lifecycle and paired with
-// releaseChannel
-func (c *Connection) allocateChannel() (*Channel, error) {
- c.m.Lock()
- defer c.m.Unlock()
-
- if c.IsClosed() {
- return nil, ErrClosed
- }
-
- id, ok := c.allocator.next()
- if !ok {
- return nil, ErrChannelMax
- }
-
- ch := newChannel(c, uint16(id))
- c.channels[uint16(id)] = ch
-
- return ch, nil
-}
-
-// releaseChannel removes a channel from the registry as the final part of the
-// channel lifecycle
-func (c *Connection) releaseChannel(id uint16) {
- c.m.Lock()
- defer c.m.Unlock()
-
- delete(c.channels, id)
- c.allocator.release(int(id))
-}
-
-// openChannel allocates and opens a channel, must be paired with closeChannel
-func (c *Connection) openChannel() (*Channel, error) {
- ch, err := c.allocateChannel()
- if err != nil {
- return nil, err
- }
-
- if err := ch.open(); err != nil {
- c.releaseChannel(ch.id)
- return nil, err
- }
- return ch, nil
-}
-
-// closeChannel releases and initiates a shutdown of the channel. All channel
-// closures should be initiated here for proper channel lifecycle management on
-// this connection.
-func (c *Connection) closeChannel(ch *Channel, e *Error) {
- ch.shutdown(e)
- c.releaseChannel(ch.id)
-}
-
-/*
-Channel opens a unique, concurrent server channel to process the bulk of AMQP
-messages. Any error from methods on this receiver will render the receiver
-invalid and a new Channel should be opened.
-
-*/
-func (c *Connection) Channel() (*Channel, error) {
- return c.openChannel()
-}
-
-func (c *Connection) call(req message, res ...message) error {
- // Special case for when the protocol header frame is sent insted of a
- // request method
- if req != nil {
- if err := c.send(&methodFrame{ChannelId: 0, Method: req}); err != nil {
- return err
- }
- }
-
- select {
- case err, ok := <-c.errors:
- if !ok {
- return ErrClosed
- }
- return err
-
- case msg := <-c.rpc:
- // Try to match one of the result types
- for _, try := range res {
- if reflect.TypeOf(msg) == reflect.TypeOf(try) {
- // *res = *msg
- vres := reflect.ValueOf(try).Elem()
- vmsg := reflect.ValueOf(msg).Elem()
- vres.Set(vmsg)
- return nil
- }
- }
- return ErrCommandInvalid
- }
- // unreachable
-}
-
-// Connection = open-Connection *use-Connection close-Connection
-// open-Connection = C:protocol-header
-// S:START C:START-OK
-// *challenge
-// S:TUNE C:TUNE-OK
-// C:OPEN S:OPEN-OK
-// challenge = S:SECURE C:SECURE-OK
-// use-Connection = *channel
-// close-Connection = C:CLOSE S:CLOSE-OK
-// / S:CLOSE C:CLOSE-OK
-func (c *Connection) open(config Config) error {
- if err := c.send(&protocolHeader{}); err != nil {
- return err
- }
-
- return c.openStart(config)
-}
-
-func (c *Connection) openStart(config Config) error {
- start := &connectionStart{}
-
- if err := c.call(nil, start); err != nil {
- return err
- }
-
- c.Major = int(start.VersionMajor)
- c.Minor = int(start.VersionMinor)
- c.Properties = Table(start.ServerProperties)
- c.Locales = strings.Split(start.Locales, " ")
-
- // eventually support challenge/response here by also responding to
- // connectionSecure.
- auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " "))
- if !ok {
- return ErrSASL
- }
-
- // Save this mechanism off as the one we chose
- c.Config.SASL = []Authentication{auth}
-
- // Set the connection locale to client locale
- c.Config.Locale = config.Locale
-
- return c.openTune(config, auth)
-}
-
-func (c *Connection) openTune(config Config, auth Authentication) error {
- if len(config.Properties) == 0 {
- config.Properties = Table{
- "product": defaultProduct,
- "version": defaultVersion,
- }
- }
-
- config.Properties["capabilities"] = Table{
- "connection.blocked": true,
- "consumer_cancel_notify": true,
- }
-
- ok := &connectionStartOk{
- ClientProperties: config.Properties,
- Mechanism: auth.Mechanism(),
- Response: auth.Response(),
- Locale: config.Locale,
- }
- tune := &connectionTune{}
-
- if err := c.call(ok, tune); err != nil {
- // per spec, a connection can only be closed when it has been opened
- // so at this point, we know it's an auth error, but the socket
- // was closed instead. Return a meaningful error.
- return ErrCredentials
- }
-
- // When the server and client both use default 0, then the max channel is
- // only limited by uint16.
- c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax))
- if c.Config.ChannelMax == 0 {
- c.Config.ChannelMax = defaultChannelMax
- }
- c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax)
-
- // Frame size includes headers and end byte (len(payload)+8), even if
- // this is less than FrameMinSize, use what the server sends because the
- // alternative is to stop the handshake here.
- c.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax))
-
- // Save this off for resetDeadline()
- c.Config.Heartbeat = time.Second * time.Duration(pick(
- int(config.Heartbeat/time.Second),
- int(tune.Heartbeat)))
-
- // "The client should start sending heartbeats after receiving a
- // Connection.Tune method"
- go c.heartbeater(c.Config.Heartbeat, c.NotifyClose(make(chan *Error, 1)))
-
- if err := c.send(&methodFrame{
- ChannelId: 0,
- Method: &connectionTuneOk{
- ChannelMax: uint16(c.Config.ChannelMax),
- FrameMax: uint32(c.Config.FrameSize),
- Heartbeat: uint16(c.Config.Heartbeat / time.Second),
- },
- }); err != nil {
- return err
- }
-
- return c.openVhost(config)
-}
-
-func (c *Connection) openVhost(config Config) error {
- req := &connectionOpen{VirtualHost: config.Vhost}
- res := &connectionOpenOk{}
-
- if err := c.call(req, res); err != nil {
- // Cannot be closed yet, but we know it's a vhost problem
- return ErrVhost
- }
-
- c.Config.Vhost = config.Vhost
-
- return c.openComplete()
-}
-
-// openComplete performs any final Connection initialization dependent on the
-// connection handshake and clears any state needed for TLS and AMQP handshaking.
-func (c *Connection) openComplete() error {
- // We clear the deadlines and let the heartbeater reset the read deadline if requested.
- // RabbitMQ uses TCP flow control at this point for pushback so Writes can
- // intentionally block.
- if deadliner, ok := c.conn.(interface {
- SetDeadline(time.Time) error
- }); ok {
- _ = deadliner.SetDeadline(time.Time{})
- }
-
- c.allocator = newAllocator(1, c.Config.ChannelMax)
- return nil
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func pick(client, server int) int {
- if client == 0 || server == 0 {
- return max(client, server)
- }
- return min(client, server)
-}
diff --git a/vendor/github.com/streadway/amqp/consumers.go b/vendor/github.com/streadway/amqp/consumers.go
deleted file mode 100644
index 887ac7494e..0000000000
--- a/vendor/github.com/streadway/amqp/consumers.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-package amqp
-
-import (
- "os"
- "strconv"
- "sync"
- "sync/atomic"
-)
-
-var consumerSeq uint64
-
-const consumerTagLengthMax = 0xFF // see writeShortstr
-
-func uniqueConsumerTag() string {
- return commandNameBasedUniqueConsumerTag(os.Args[0])
-}
-
-func commandNameBasedUniqueConsumerTag(commandName string) string {
- tagPrefix := "ctag-"
- tagInfix := commandName
- tagSuffix := "-" + strconv.FormatUint(atomic.AddUint64(&consumerSeq, 1), 10)
-
- if len(tagPrefix)+len(tagInfix)+len(tagSuffix) > consumerTagLengthMax {
- tagInfix = "streadway/amqp"
- }
-
- return tagPrefix + tagInfix + tagSuffix
-}
-
-type consumerBuffers map[string]chan *Delivery
-
-// Concurrent type that manages the consumerTag ->
-// ingress consumerBuffer mapping
-type consumers struct {
- sync.WaitGroup // one for buffer
- closed chan struct{} // signal buffer
-
- sync.Mutex // protects below
- chans consumerBuffers
-}
-
-func makeConsumers() *consumers {
- return &consumers{
- closed: make(chan struct{}),
- chans: make(consumerBuffers),
- }
-}
-
-func (subs *consumers) buffer(in chan *Delivery, out chan Delivery) {
- defer close(out)
- defer subs.Done()
-
- var inflight = in
- var queue []*Delivery
-
- for delivery := range in {
- queue = append(queue, delivery)
-
- for len(queue) > 0 {
- select {
- case <-subs.closed:
- // closed before drained, drop in-flight
- return
-
- case delivery, consuming := <-inflight:
- if consuming {
- queue = append(queue, delivery)
- } else {
- inflight = nil
- }
-
- case out <- *queue[0]:
- queue = queue[1:]
- }
- }
- }
-}
-
-// On key conflict, close the previous channel.
-func (subs *consumers) add(tag string, consumer chan Delivery) {
- subs.Lock()
- defer subs.Unlock()
-
- if prev, found := subs.chans[tag]; found {
- close(prev)
- }
-
- in := make(chan *Delivery)
- subs.chans[tag] = in
-
- subs.Add(1)
- go subs.buffer(in, consumer)
-}
-
-func (subs *consumers) cancel(tag string) (found bool) {
- subs.Lock()
- defer subs.Unlock()
-
- ch, found := subs.chans[tag]
-
- if found {
- delete(subs.chans, tag)
- close(ch)
- }
-
- return found
-}
-
-func (subs *consumers) close() {
- subs.Lock()
- defer subs.Unlock()
-
- close(subs.closed)
-
- for tag, ch := range subs.chans {
- delete(subs.chans, tag)
- close(ch)
- }
-
- subs.Wait()
-}
-
-// Sends a delivery to a the consumer identified by `tag`.
-// If unbuffered channels are used for Consume this method
-// could block all deliveries until the consumer
-// receives on the other end of the channel.
-func (subs *consumers) send(tag string, msg *Delivery) bool {
- subs.Lock()
- defer subs.Unlock()
-
- buffer, found := subs.chans[tag]
- if found {
- buffer <- msg
- }
-
- return found
-}
diff --git a/vendor/github.com/streadway/amqp/doc.go b/vendor/github.com/streadway/amqp/doc.go
deleted file mode 100644
index ee69c5b382..0000000000
--- a/vendor/github.com/streadway/amqp/doc.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-/*
-Package amqp is an AMQP 0.9.1 client with RabbitMQ extensions
-
-Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much
-of the terminology in this library directly relates to AMQP concepts.
-
- Resources
-
- http://www.rabbitmq.com/tutorials/amqp-concepts.html
- http://www.rabbitmq.com/getstarted.html
- http://www.rabbitmq.com/amqp-0-9-1-reference.html
-
-Design
-
-Most other broker clients publish to queues, but in AMQP, clients publish
-Exchanges instead. AMQP is programmable, meaning that both the producers and
-consumers agree on the configuration of the broker, instead of requiring an
-operator or system configuration that declares the logical topology in the
-broker. The routing between producers and consumer queues is via Bindings.
-These bindings form the logical topology of the broker.
-
-In this library, a message sent from publisher is called a "Publishing" and a
-message received to a consumer is called a "Delivery". The fields of
-Publishings and Deliveries are close but not exact mappings to the underlying
-wire format to maintain stronger types. Many other libraries will combine
-message properties with message headers. In this library, the message well
-known properties are strongly typed fields on the Publishings and Deliveries,
-whereas the user defined headers are in the Headers field.
-
-The method naming closely matches the protocol's method name with positional
-parameters mapping to named protocol message fields. The motivation here is to
-present a comprehensive view over all possible interactions with the server.
-
-Generally, methods that map to protocol methods of the "basic" class will be
-elided in this interface, and "select" methods of various channel mode selectors
-will be elided for example Channel.Confirm and Channel.Tx.
-
-The library is intentionally designed to be synchronous, where responses for
-each protocol message are required to be received in an RPC manner. Some
-methods have a noWait parameter like Channel.QueueDeclare, and some methods are
-asynchronous like Channel.Publish. The error values should still be checked for
-these methods as they will indicate IO failures like when the underlying
-connection closes.
-
-Asynchronous Events
-
-Clients of this library may be interested in receiving some of the protocol
-messages other than Deliveries like basic.ack methods while a channel is in
-confirm mode.
-
-The Notify* methods with Connection and Channel receivers model the pattern of
-asynchronous events like closes due to exceptions, or messages that are sent out
-of band from an RPC call like basic.ack or basic.flow.
-
-Any asynchronous events, including Deliveries and Publishings must always have
-a receiver until the corresponding chans are closed. Without asynchronous
-receivers, the sychronous methods will block.
-
-Use Case
-
-It's important as a client to an AMQP topology to ensure the state of the
-broker matches your expectations. For both publish and consume use cases,
-make sure you declare the queues, exchanges and bindings you expect to exist
-prior to calling Channel.Publish or Channel.Consume.
-
- // Connections start with amqp.Dial() typically from a command line argument
- // or environment variable.
- connection, err := amqp.Dial(os.Getenv("AMQP_URL"))
-
- // To cleanly shutdown by flushing kernel buffers, make sure to close and
- // wait for the response.
- defer connection.Close()
-
- // Most operations happen on a channel. If any error is returned on a
- // channel, the channel will no longer be valid, throw it away and try with
- // a different channel. If you use many channels, it's useful for the
- // server to
- channel, err := connection.Channel()
-
- // Declare your topology here, if it doesn't exist, it will be created, if
- // it existed already and is not what you expect, then that's considered an
- // error.
-
- // Use your connection on this topology with either Publish or Consume, or
- // inspect your queues with QueueInspect. It's unwise to mix Publish and
- // Consume to let TCP do its job well.
-
-SSL/TLS - Secure connections
-
-When Dial encounters an amqps:// scheme, it will use the zero value of a
-tls.Config. This will only perform server certificate and host verification.
-
-Use DialTLS when you wish to provide a client certificate (recommended),
-include a private certificate authority's certificate in the cert chain for
-server validity, or run insecure by not verifying the server certificate dial
-your own connection. DialTLS will use the provided tls.Config when it
-encounters an amqps:// scheme and will dial a plain connection when it
-encounters an amqp:// scheme.
-
-SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html
-
-*/
-package amqp
diff --git a/vendor/github.com/streadway/amqp/fuzz.go b/vendor/github.com/streadway/amqp/fuzz.go
deleted file mode 100644
index 16e626ce75..0000000000
--- a/vendor/github.com/streadway/amqp/fuzz.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build gofuzz
-
-package amqp
-
-import "bytes"
-
-func Fuzz(data []byte) int {
- r := reader{bytes.NewReader(data)}
- frame, err := r.ReadFrame()
- if err != nil {
- if frame != nil {
- panic("frame is not nil")
- }
- return 0
- }
- return 1
-}
diff --git a/vendor/github.com/streadway/amqp/pre-commit b/vendor/github.com/streadway/amqp/pre-commit
deleted file mode 100644
index 3715530073..0000000000
--- a/vendor/github.com/streadway/amqp/pre-commit
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/sh
-
-LATEST_STABLE_SUPPORTED_GO_VERSION="1.11"
-
-main() {
- if local_go_version_is_latest_stable
- then
- run_gofmt
- run_golint
- run_govet
- fi
- run_unit_tests
-}
-
-local_go_version_is_latest_stable() {
- go version | grep -q $LATEST_STABLE_SUPPORTED_GO_VERSION
-}
-
-log_error() {
- echo "$*" 1>&2
-}
-
-run_gofmt() {
- GOFMT_FILES=$(gofmt -l .)
- if [ -n "$GOFMT_FILES" ]
- then
- log_error "gofmt failed for the following files:
-$GOFMT_FILES
-
-please run 'gofmt -w .' on your changes before committing."
- exit 1
- fi
-}
-
-run_golint() {
- GOLINT_ERRORS=$(golint ./... | grep -v "Id should be")
- if [ -n "$GOLINT_ERRORS" ]
- then
- log_error "golint failed for the following reasons:
-$GOLINT_ERRORS
-
-please run 'golint ./...' on your changes before committing."
- exit 1
- fi
-}
-
-run_govet() {
- GOVET_ERRORS=$(go tool vet ./*.go 2>&1)
- if [ -n "$GOVET_ERRORS" ]
- then
- log_error "go vet failed for the following reasons:
-$GOVET_ERRORS
-
-please run 'go tool vet ./*.go' on your changes before committing."
- exit 1
- fi
-}
-
-run_unit_tests() {
- if [ -z "$NOTEST" ]
- then
- log_error 'Running short tests...'
- env AMQP_URL= go test -short
- fi
-}
-
-main
diff --git a/vendor/github.com/streadway/amqp/types.go b/vendor/github.com/streadway/amqp/types.go
deleted file mode 100644
index 83bd92f977..0000000000
--- a/vendor/github.com/streadway/amqp/types.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-package amqp
-
-import (
- "fmt"
- "io"
- "time"
-)
-
-// Constants for standard AMQP 0-9-1 exchange types.
-const (
- ExchangeDirect = "direct"
- ExchangeFanout = "fanout"
- ExchangeTopic = "topic"
- ExchangeHeaders = "headers"
-)
-
-var (
- // ErrClosed is returned when the channel or connection is not open
- ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"}
-
- // ErrChannelMax is returned when Connection.Channel has been called enough
- // times that all channel IDs have been exhausted in the client or the
- // server.
- ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"}
-
- // ErrSASL is returned from Dial when the authentication mechanism could not
- // be negoated.
- ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"}
-
- // ErrCredentials is returned when the authenticated client is not authorized
- // to any vhost.
- ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"}
-
- // ErrVhost is returned when the authenticated user is not permitted to
- // access the requested Vhost.
- ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"}
-
- // ErrSyntax is hard protocol error, indicating an unsupported protocol,
- // implementation or encoding.
- ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"}
-
- // ErrFrame is returned when the protocol frame cannot be read from the
- // server, indicating an unsupported protocol or unsupported frame type.
- ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"}
-
- // ErrCommandInvalid is returned when the server sends an unexpected response
- // to this requested message type. This indicates a bug in this client.
- ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"}
-
- // ErrUnexpectedFrame is returned when something other than a method or
- // heartbeat frame is delivered to the Connection, indicating a bug in the
- // client.
- ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"}
-
- // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP.
- ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"}
-)
-
-// Error captures the code and reason a channel or connection has been closed
-// by the server.
-type Error struct {
- Code int // constant code from the specification
- Reason string // description of the error
- Server bool // true when initiated from the server, false when from this library
- Recover bool // true when this error can be recovered by retrying later or with different parameters
-}
-
-func newError(code uint16, text string) *Error {
- return &Error{
- Code: int(code),
- Reason: text,
- Recover: isSoftExceptionCode(int(code)),
- Server: true,
- }
-}
-
-func (e Error) Error() string {
- return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason)
-}
-
-// Used by header frames to capture routing and header information
-type properties struct {
- ContentType string // MIME content type
- ContentEncoding string // MIME content encoding
- Headers Table // Application or header exchange table
- DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2)
- Priority uint8 // queue implementation use - 0 to 9
- CorrelationId string // application use - correlation identifier
- ReplyTo string // application use - address to to reply to (ex: RPC)
- Expiration string // implementation use - message expiration spec
- MessageId string // application use - message identifier
- Timestamp time.Time // application use - message timestamp
- Type string // application use - message type name
- UserId string // application use - creating user id
- AppId string // application use - creating application
- reserved1 string // was cluster-id - process for buffer consumption
-}
-
-// DeliveryMode. Transient means higher throughput but messages will not be
-// restored on broker restart. The delivery mode of publishings is unrelated
-// to the durability of the queues they reside on. Transient messages will
-// not be restored to durable queues, persistent messages will be restored to
-// durable queues and lost on non-durable queues during server restart.
-//
-// This remains typed as uint8 to match Publishing.DeliveryMode. Other
-// delivery modes specific to custom queue implementations are not enumerated
-// here.
-const (
- Transient uint8 = 1
- Persistent uint8 = 2
-)
-
-// The property flags are an array of bits that indicate the presence or
-// absence of each property value in sequence. The bits are ordered from most
-// high to low - bit 15 indicates the first property.
-const (
- flagContentType = 0x8000
- flagContentEncoding = 0x4000
- flagHeaders = 0x2000
- flagDeliveryMode = 0x1000
- flagPriority = 0x0800
- flagCorrelationId = 0x0400
- flagReplyTo = 0x0200
- flagExpiration = 0x0100
- flagMessageId = 0x0080
- flagTimestamp = 0x0040
- flagType = 0x0020
- flagUserId = 0x0010
- flagAppId = 0x0008
- flagReserved1 = 0x0004
-)
-
-// Queue captures the current server state of the queue on the server returned
-// from Channel.QueueDeclare or Channel.QueueInspect.
-type Queue struct {
- Name string // server confirmed or generated name
- Messages int // count of messages not awaiting acknowledgment
- Consumers int // number of consumers receiving deliveries
-}
-
-// Publishing captures the client message sent to the server. The fields
-// outside of the Headers table included in this struct mirror the underlying
-// fields in the content frame. They use native types for convenience and
-// efficiency.
-type Publishing struct {
- // Application or exchange specific fields,
- // the headers exchange will inspect this field.
- Headers Table
-
- // Properties
- ContentType string // MIME content type
- ContentEncoding string // MIME content encoding
- DeliveryMode uint8 // Transient (0 or 1) or Persistent (2)
- Priority uint8 // 0 to 9
- CorrelationId string // correlation identifier
- ReplyTo string // address to to reply to (ex: RPC)
- Expiration string // message expiration spec
- MessageId string // message identifier
- Timestamp time.Time // message timestamp
- Type string // message type name
- UserId string // creating user id - ex: "guest"
- AppId string // creating application id
-
- // The application specific payload of the message
- Body []byte
-}
-
-// Blocking notifies the server's TCP flow control of the Connection. When a
-// server hits a memory or disk alarm it will block all connections until the
-// resources are reclaimed. Use NotifyBlock on the Connection to receive these
-// events.
-type Blocking struct {
- Active bool // TCP pushback active/inactive on server
- Reason string // Server reason for activation
-}
-
-// Confirmation notifies the acknowledgment or negative acknowledgement of a
-// publishing identified by its delivery tag. Use NotifyPublish on the Channel
-// to consume these events.
-type Confirmation struct {
- DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode
- Ack bool // True when the server successfully received the publishing
-}
-
-// Decimal matches the AMQP decimal type. Scale is the number of decimal
-// digits Scale == 2, Value == 12345, Decimal == 123.45
-type Decimal struct {
- Scale uint8
- Value int32
-}
-
-// Table stores user supplied fields of the following types:
-//
-// bool
-// byte
-// float32
-// float64
-// int
-// int16
-// int32
-// int64
-// nil
-// string
-// time.Time
-// amqp.Decimal
-// amqp.Table
-// []byte
-// []interface{} - containing above types
-//
-// Functions taking a table will immediately fail when the table contains a
-// value of an unsupported type.
-//
-// The caller must be specific in which precision of integer it wishes to
-// encode.
-//
-// Use a type assertion when reading values from a table for type conversion.
-//
-// RabbitMQ expects int32 for integer values.
-//
-type Table map[string]interface{}
-
-func validateField(f interface{}) error {
- switch fv := f.(type) {
- case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time:
- return nil
-
- case []interface{}:
- for _, v := range fv {
- if err := validateField(v); err != nil {
- return fmt.Errorf("in array %s", err)
- }
- }
- return nil
-
- case Table:
- for k, v := range fv {
- if err := validateField(v); err != nil {
- return fmt.Errorf("table field %q %s", k, err)
- }
- }
- return nil
- }
-
- return fmt.Errorf("value %T not supported", f)
-}
-
-// Validate returns and error if any Go types in the table are incompatible with AMQP types.
-func (t Table) Validate() error {
- return validateField(t)
-}
-
-// Heap interface for maintaining delivery tags
-type tagSet []uint64
-
-func (set tagSet) Len() int { return len(set) }
-func (set tagSet) Less(i, j int) bool { return (set)[i] < (set)[j] }
-func (set tagSet) Swap(i, j int) { (set)[i], (set)[j] = (set)[j], (set)[i] }
-func (set *tagSet) Push(tag interface{}) { *set = append(*set, tag.(uint64)) }
-func (set *tagSet) Pop() interface{} {
- val := (*set)[len(*set)-1]
- *set = (*set)[:len(*set)-1]
- return val
-}
-
-type message interface {
- id() (uint16, uint16)
- wait() bool
- read(io.Reader) error
- write(io.Writer) error
-}
-
-type messageWithContent interface {
- message
- getContent() (properties, []byte)
- setContent(properties, []byte)
-}
-
-/*
-The base interface implemented as:
-
-2.3.5 frame Details
-
-All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects
-malformed frames:
-
- 0 1 3 7 size+7 size+8
- +------+---------+-------------+ +------------+ +-----------+
- | type | channel | size | | payload | | frame-end |
- +------+---------+-------------+ +------------+ +-----------+
- octet short long size octets octet
-
-To read a frame, we:
-
- 1. Read the header and check the frame type and channel.
- 2. Depending on the frame type, we read the payload and process it.
- 3. Read the frame end octet.
-
-In realistic implementations where performance is a concern, we would use
-“read-ahead buffering” or “gathering reads” to avoid doing three separate
-system calls to read a frame.
-
-*/
-type frame interface {
- write(io.Writer) error
- channel() uint16
-}
-
-type reader struct {
- r io.Reader
-}
-
-type writer struct {
- w io.Writer
-}
-
-// Implements the frame interface for Connection RPC
-type protocolHeader struct{}
-
-func (protocolHeader) write(w io.Writer) error {
- _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1})
- return err
-}
-
-func (protocolHeader) channel() uint16 {
- panic("only valid as initial handshake")
-}
-
-/*
-Method frames carry the high-level protocol commands (which we call "methods").
-One method frame carries one command. The method frame payload has this format:
-
- 0 2 4
- +----------+-----------+-------------- - -
- | class-id | method-id | arguments...
- +----------+-----------+-------------- - -
- short short ...
-
-To process a method frame, we:
- 1. Read the method frame payload.
- 2. Unpack it into a structure. A given method always has the same structure,
- so we can unpack the method rapidly. 3. Check that the method is allowed in
- the current context.
- 4. Check that the method arguments are valid.
- 5. Execute the method.
-
-Method frame bodies are constructed as a list of AMQP data fields (bits,
-integers, strings and string tables). The marshalling code is trivially
-generated directly from the protocol specifications, and can be very rapid.
-*/
-type methodFrame struct {
- ChannelId uint16
- ClassId uint16
- MethodId uint16
- Method message
-}
-
-func (f *methodFrame) channel() uint16 { return f.ChannelId }
-
-/*
-Heartbeating is a technique designed to undo one of TCP/IP's features, namely
-its ability to recover from a broken physical connection by closing only after
-a quite long time-out. In some scenarios we need to know very rapidly if a
-peer is disconnected or not responding for other reasons (e.g. it is looping).
-Since heartbeating can be done at a low level, we implement this as a special
-type of frame that peers exchange at the transport level, rather than as a
-class method.
-*/
-type heartbeatFrame struct {
- ChannelId uint16
-}
-
-func (f *heartbeatFrame) channel() uint16 { return f.ChannelId }
-
-/*
-Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally
-defined as carrying content. When a peer sends such a method frame, it always
-follows it with a content header and zero or more content body frames.
-
-A content header frame has this format:
-
- 0 2 4 12 14
- +----------+--------+-----------+----------------+------------- - -
- | class-id | weight | body size | property flags | property list...
- +----------+--------+-----------+----------------+------------- - -
- short short long long short remainder...
-
-We place content body in distinct frames (rather than including it in the
-method) so that AMQP may support "zero copy" techniques in which content is
-never marshalled or encoded. We place the content properties in their own
-frame so that recipients can selectively discard contents they do not want to
-process
-*/
-type headerFrame struct {
- ChannelId uint16
- ClassId uint16
- weight uint16
- Size uint64
- Properties properties
-}
-
-func (f *headerFrame) channel() uint16 { return f.ChannelId }
-
-/*
-Content is the application data we carry from client-to-client via the AMQP
-server. Content is, roughly speaking, a set of properties plus a binary data
-part. The set of allowed properties are defined by the Basic class, and these
-form the "content header frame". The data can be any size, and MAY be broken
-into several (or many) chunks, each forming a "content body frame".
-
-Looking at the frames for a specific channel, as they pass on the wire, we
-might see something like this:
-
- [method]
- [method] [header] [body] [body]
- [method]
- ...
-*/
-type bodyFrame struct {
- ChannelId uint16
- Body []byte
-}
-
-func (f *bodyFrame) channel() uint16 { return f.ChannelId }
diff --git a/vendor/github.com/streadway/amqp/uri.go b/vendor/github.com/streadway/amqp/uri.go
deleted file mode 100644
index e584715497..0000000000
--- a/vendor/github.com/streadway/amqp/uri.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Source code and contact info at http://github.com/streadway/amqp
-
-package amqp
-
-import (
- "errors"
- "net"
- "net/url"
- "strconv"
- "strings"
-)
-
-var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'")
-var errURIWhitespace = errors.New("URI must not contain whitespace")
-
-var schemePorts = map[string]int{
- "amqp": 5672,
- "amqps": 5671,
-}
-
-var defaultURI = URI{
- Scheme: "amqp",
- Host: "localhost",
- Port: 5672,
- Username: "guest",
- Password: "guest",
- Vhost: "/",
-}
-
-// URI represents a parsed AMQP URI string.
-type URI struct {
- Scheme string
- Host string
- Port int
- Username string
- Password string
- Vhost string
-}
-
-// ParseURI attempts to parse the given AMQP URI according to the spec.
-// See http://www.rabbitmq.com/uri-spec.html.
-//
-// Default values for the fields are:
-//
-// Scheme: amqp
-// Host: localhost
-// Port: 5672
-// Username: guest
-// Password: guest
-// Vhost: /
-//
-func ParseURI(uri string) (URI, error) {
- builder := defaultURI
-
- if strings.Contains(uri, " ") == true {
- return builder, errURIWhitespace
- }
-
- u, err := url.Parse(uri)
- if err != nil {
- return builder, err
- }
-
- defaultPort, okScheme := schemePorts[u.Scheme]
-
- if okScheme {
- builder.Scheme = u.Scheme
- } else {
- return builder, errURIScheme
- }
-
- host := u.Hostname()
- port := u.Port()
-
- if host != "" {
- builder.Host = host
- }
-
- if port != "" {
- port32, err := strconv.ParseInt(port, 10, 32)
- if err != nil {
- return builder, err
- }
- builder.Port = int(port32)
- } else {
- builder.Port = defaultPort
- }
-
- if u.User != nil {
- builder.Username = u.User.Username()
- if password, ok := u.User.Password(); ok {
- builder.Password = password
- }
- }
-
- if u.Path != "" {
- if strings.HasPrefix(u.Path, "/") {
- if u.Host == "" && strings.HasPrefix(u.Path, "///") {
- // net/url doesn't handle local context authorities and leaves that up
- // to the scheme handler. In our case, we translate amqp:/// into the
- // default host and whatever the vhost should be
- if len(u.Path) > 3 {
- builder.Vhost = u.Path[3:]
- }
- } else if len(u.Path) > 1 {
- builder.Vhost = u.Path[1:]
- }
- } else {
- builder.Vhost = u.Path
- }
- }
-
- return builder, nil
-}
-
-// PlainAuth returns a PlainAuth structure based on the parsed URI's
-// Username and Password fields.
-func (uri URI) PlainAuth() *PlainAuth {
- return &PlainAuth{
- Username: uri.Username,
- Password: uri.Password,
- }
-}
-
-// AMQPlainAuth returns a PlainAuth structure based on the parsed URI's
-// Username and Password fields.
-func (uri URI) AMQPlainAuth() *AMQPlainAuth {
- return &AMQPlainAuth{
- Username: uri.Username,
- Password: uri.Password,
- }
-}
-
-func (uri URI) String() string {
- authority, err := url.Parse("")
- if err != nil {
- return err.Error()
- }
-
- authority.Scheme = uri.Scheme
-
- if uri.Username != defaultURI.Username || uri.Password != defaultURI.Password {
- authority.User = url.User(uri.Username)
-
- if uri.Password != defaultURI.Password {
- authority.User = url.UserPassword(uri.Username, uri.Password)
- }
- }
-
- authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port))
-
- if defaultPort, found := schemePorts[uri.Scheme]; !found || defaultPort != uri.Port {
- authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port))
- } else {
- // JoinHostPort() automatically add brackets to the host if it's
- // an IPv6 address.
- //
- // If not port is specified, JoinHostPort() return an IP address in the
- // form of "[::1]:", so we use TrimSuffix() to remove the extra ":".
- authority.Host = strings.TrimSuffix(net.JoinHostPort(uri.Host, ""), ":")
- }
-
- if uri.Vhost != defaultURI.Vhost {
- // Make sure net/url does not double escape, e.g.
- // "%2F" does not become "%252F".
- authority.Path = uri.Vhost
- authority.RawPath = url.QueryEscape(uri.Vhost)
- } else {
- authority.Path = "/"
- }
-
- return authority.String()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
deleted file mode 100644
index 2734907909..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-*.out
-*.test
-*.xml
-*.swp
-.idea/
-.tmp/
-*.iml
-*.cov
-*.html
-*.log
-gen/thrift/js
-gen/thrift/py
-vendor/
-crossdock-main
-crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
deleted file mode 100644
index 295ebcf622..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "idl"]
- path = idl
- url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
deleted file mode 100644
index 435aea1d5b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-sudo: required
-
-language: go
-go_import_path: github.com/uber/jaeger-client-go
-
-dist: trusty
-
-matrix:
- include:
- # - go: 1.15.x
- # env:
- # - TESTS=true
- # - USE_DEP=true
- # - COVERAGE=true
- - go: 1.15.x
- env:
- - USE_DEP=true
- - CROSSDOCK=true
- # - go: 1.15.x
- # env:
- # - TESTS=true
- # - USE_DEP=false
- # - USE_GLIDE=true
- # test with previous version of Go
- # - go: 1.14.x
- # env:
- # - TESTS=true
- # - USE_DEP=true
- # - CI_SKIP_LINT=true
-
-services:
- - docker
-
-env:
- global:
- - DOCKER_COMPOSE_VERSION=1.8.0
- - COMMIT=${TRAVIS_COMMIT::8}
- # DOCKER_PASS
- - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
- # DOCKER_USER
- - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
-
-install:
- - make install-ci USE_DEP=$USE_DEP
- - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
-
-script:
- - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
- - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
-
-after_success:
- - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
- - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
-
-after_failure:
- - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
deleted file mode 100644
index 964a4049c0..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ /dev/null
@@ -1,396 +0,0 @@
-Changes by Version
-==================
-
-2.30.0 (2021-12-07)
--------------------
-- Add deprecation notice -- Yuri Shkuro
-- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro
-- Remove redundant newline in NewReporter init message (#603) -- wwade
-- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan
-- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng
-- Remove outdated reference to Zipkin model. -- Yuri Shkuro
-- Move thrift compilation to a script (#590) -- Aaron Jheng
-- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro
-
-2.29.1 (2021-05-24)
--------------------
-- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro
-
-
-2.29.0 (2021-05-20)
--------------------
-- Update vendored thrift to 0.14.1 (#584) -- @nhatthm
-
-
-2.28.0 (2021-04-30)
--------------------
-- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott
-
-
-2.27.0 (2021-04-19)
--------------------
-- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell
-
-
-2.26.0 (2021-04-16)
--------------------
-- Delete a baggage item when value is blank (#562) -- evan.kim
-- Trim baggage key when parsing (#566) -- sicong.huang
-- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o
-- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro
-- Additional context protections (#544) -- Joe Elliott
-- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima
-- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro
-- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro
-- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel
-- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro
-
-
-2.25.0 (2020-07-13)
--------------------
-## Breaking changes
-- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
-
- The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
- The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
- or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
-
-## Bug fixes
-- Do not add invalid context to references (#521) -- Yuri Shkuro
-
-
-2.24.0 (2020-06-14)
--------------------
-- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
-- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
-- Override reporter config only when agent host/port is set in env (#513) -- ilylia
-- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
-
-
-2.23.1 (2020-04-28)
--------------------
-- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
-
-
-2.23.0 (2020-04-22)
--------------------
-
-- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
-- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
-- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
-
-2.22.1 (2020-01-16)
--------------------
-
-- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
-
-
-2.22.0 (2020-01-15)
--------------------
-
-- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
-- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
-
-
-2.21.1 (2019-12-20)
--------------------
-
-- Update version correctly.
-
-
-2.21.0 (2019-12-20)
--------------------
-
-- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
-- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
-- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
-- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
-
-
-2.20.1 (2019-11-08)
--------------------
-
-Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
-
-- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
-- Create `OperationNameLateBinding` sampler option and config option
-- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
-
-
-2.20.0 (2019-11-06)
--------------------
-
-## New Features
-
-- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
-
- Sampling state is shared between all spans of the trace that are still in memory.
- This allows implementation of delayed sampling decisions (see below).
-
-- Support delayed sampling decisions (#449) -- Yuri Shkuro
-
- This is a large structural change to how the samplers work.
- It allows some samplers to be executed multiple times on different
- span events (like setting a tag) and make a positive sampling decision
- later in the span life cycle, or even based on children spans.
- See [README](./README.md#delayed-sampling) for more details.
-
- There is a related minor change in behavior of the adaptive (per-operation) sampler,
- which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
- operation used to make the sampling decision is always the one provided at span creation.
-
-- Add experimental tag matching sampler (#452) -- Yuri Shkuro
-
- A sampler that can sample a trace based on a certain tag added to the root
- span or one of its local (in-process) children. The sampler can be used with
- another experimental `PrioritySampler` that allows multiple samplers to try
- to make a sampling decision, in a certain priority order.
-
-- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
-- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
-
-## Minor patches
-
-- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
-- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
-- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
-- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
-
-2.19.0 (2019-09-23)
--------------------
-
-- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
-
-
-2.18.1 (2019-09-16)
--------------------
-
-- Remove go.mod / go.sum that interfere with `go get` (#432)
-
-
-2.18.0 (2019-09-09)
--------------------
-
-- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423)
-
-
-2.17.0 (2019-08-30)
--------------------
-
-- Add a flag for firehose mode (#419)
-- Default sampling server URL to agent (#414)
-- Update default sampling rate when sampling strategy is refreshed (#413)
-- Support "Self" Span Reference (#411)
-- Don't complain about blank service name if tracing is Disabled (#410) Yuri
-- Use IP address from tag if exist (#402)
-- Expose span data to custom reporters [fixes #394] (#399)
-- Fix the span allocation in the pool (#381)
-
-
-2.16.0 (2019-03-24)
--------------------
-
-- Add baggage to B3 codec (#319)
-- Add support for 128bit trace ids to zipkin thrift spans. (#378)
-- Update zipkin propagation logic to support 128bit traceIDs (#373)
-- Accept "true" for the x-b3-sampled header (#356)
-
-- Allow setting of PoolSpans from Config object (#322)
-- Make propagators public to allow wrapping (#379)
-- Change default metric namespace to use relevant separator for the metric backend (#364)
-- Change metrics prefix to jaeger_tracer and add descriptions (#346)
-- Bump OpenTracing to ^1.1.x (#383)
-- Upgrade jaeger-lib to v2.0.0 (#359)
-- Avoid defer when generating random number (#358)
-- Use a pool of rand.Source to reduce lock contention when creating span ids (#357)
-- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342)
-
-
-2.15.0 (2018-10-10)
--------------------
-
-- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313)
-- Make maximum annotation length configurable in tracer options (#318)
-- Support more environment variables in configuration (#323)
-- Print error on Sampler Query failure (#328)
-- Add an HTTPOption to support custom http.RoundTripper (#333)
-- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331)
-
-
-2.14.0 (2018-04-30)
--------------------
-
-- Support throttling for debug traces (#274)
-- Remove dependency on Apache Thrift (#303)
-- Remove dependency on tchannel (#295) (#294)
-- Test with Go 1.9 (#298)
-
-
-2.13.0 (2018-04-15)
--------------------
-
-- Use value receiver for config.NewTracer() (#283)
-- Lock span during jaeger thrift conversion (#273)
-- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260)
-- Added support for client configuration via env vars (#275)
-- Allow overriding sampler in the Config (#270)
-
-
-2.12.0 (2018-03-14)
--------------------
-
-- Use lock when retrieving span.Context() (#268)
-- Add Configuration support for custom Injector and Extractor (#263)
-
-
-2.11.2 (2018-01-12)
--------------------
-
-- Add Gopkg.toml to allow using the lib with `dep`
-
-
-2.11.1 (2018-01-03)
--------------------
-
-- Do not enqueue spans after Reporter is closed (#235, #245)
-- Change default flush interval to 1sec (#243)
-
-
-2.11.0 (2017-11-27)
--------------------
-
-- Normalize metric names and tags to be compatible with Prometheus (#222)
-
-
-2.10.0 (2017-11-14)
--------------------
-
-- Support custom tracing headers (#176)
-- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
-- Do not coerce baggage keys to lower case (#196)
-- Log span name when span cannot be reported (#198)
-- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
-
-
-2.9.0 (2017-07-29)
-------------------
-
-- Pin thrift <= 0.10 (#179)
-- Introduce a parallel interface ContribObserver (#159)
-
-
-2.8.0 (2017-07-05)
-------------------
-
-- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
-- Add options to set tracer tags
-
-
-2.7.0 (2017-06-21)
-------------------
-
-- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
-- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
-- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
-- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
-- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
-
-
-2.6.0 (2017-03-28)
-------------------
-
-- Add config option to initialize RPC Metrics feature
-
-
-2.5.0 (2017-03-23)
-------------------
-
-- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
-- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
-- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
-
-
-2.4.0 (2017-03-21)
-------------------
-
-- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
-- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
-
-
-2.3.0 (2017-03-20)
-------------------
-
-- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
-- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
-
-
-2.2.1 (2017-03-14)
-------------------
-
-- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
-
-
-2.2.0 (2017-03-10)
-------------------
-
-- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
-- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
-
-
-2.1.2 (2017-02-27)
--------------------
-
-- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
-- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
-- Add tracer initialization godoc examples
-
-
-2.1.1 (2017-02-21)
--------------------
-
-- Fix inefficient usage of zap.Logger
-
-
-2.1.0 (2017-02-17)
--------------------
-
-- Add adapter for zap.Logger (https://github.com/uber-go/zap)
-- Move logging API to ./log/ package
-
-
-2.0.0 (2017-02-08)
--------------------
-
-- Support Adaptive Sampling
-- Support 128bit Trace IDs
-- Change trace/span IDs from uint64 to strong types TraceID and SpanID
-- Add Zipkin HTTP B3 Propagation format support #72
-- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
-- Change API for tracer, reporter, sampler initialization
-
-
-1.6.0 (2016-10-14)
--------------------
-
-- Add Zipkin HTTP transport
-- Support external baggage via jaeger-baggage header
-- Unpin Thrift version, keep to master
-
-
-1.5.1 (2016-09-27)
--------------------
-
-- Relax dependency on opentracing to ^1
-
-
-1.5.0 (2016-09-27)
--------------------
-
-- Upgrade to opentracing-go 1.0
-- Support KV logging for Spans
-
-
-1.4.0 (2016-09-14)
--------------------
-
-- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
deleted file mode 100644
index 0572efcd42..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-
-* @jaegertracing/jaeger-maintainers
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
deleted file mode 100644
index 41e2154cf6..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
+++ /dev/null
@@ -1,170 +0,0 @@
-# How to Contribute to Jaeger
-
-We'd love your help!
-
-Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
-pull requests. This document outlines some of the conventions on development
-workflow, commit message formatting, contact points and other resources to make
-it easier to get your contribution accepted.
-
-We gratefully welcome improvements to documentation as well as to code.
-
-# Certificate of Origin
-
-By contributing to this project you agree to the [Developer Certificate of
-Origin](https://developercertificate.org/) (DCO). This document was created
-by the Linux Kernel community and is a simple statement that you, as a
-contributor, have the legal right to make the contribution. See the [DCO](DCO)
-file for details.
-
-## Getting Started
-
-This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
-
-To get started, make sure you clone the Git repository into the correct location
-`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
-
-```
-mkdir -p $GOPATH/src/github.com/uber
-cd $GOPATH/src/github.com/uber
-git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
-cd jaeger-client-go
-git submodule update --init --recursive
-```
-
-Then install dependencies and run the tests:
-
-```
-make install
-make test
-```
-
-## Imports grouping
-
-This projects follows the following pattern for grouping imports in Go files:
- * imports from standard library
- * imports from other projects
- * imports from `jaeger-client-go` project
-
-For example:
-
-```go
-import (
- "fmt"
-
- "github.com/uber/jaeger-lib/metrics"
- "go.uber.org/zap"
-
- "github.com/uber/jaeger-client-go/config"
-)
-```
-
-## Making A Change
-
-*Before making any significant changes, please [open an
-issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
-changes ahead of time will make the contribution process smooth for everyone.
-
-Once we've discussed your changes and you've got your code ready, make sure
-that tests are passing (`make test` or `make cover`) and open your PR. Your
-pull request is most likely to be accepted if it:
-
-* Includes tests for new functionality.
-* Follows the guidelines in [Effective
- Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
- review comments](https://github.com/golang/go/wiki/CodeReviewComments).
-* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
- * Separate subject from body with a blank line
- * Limit the subject line to 50 characters
- * Capitalize the subject line
- * Do not end the subject line with a period
- * Use the imperative mood in the subject line
- * Wrap the body at 72 characters
- * Use the body to explain _what_ and _why_ instead of _how_
-* Each commit must be signed by the author ([see below](#sign-your-work)).
-
-## License
-
-By contributing your code, you agree to license your contribution under the terms
-of the [Apache License](LICENSE).
-
-If you are adding a new file it should have a header like below. The easiest
-way to add such header is to run `make fmt`.
-
-```
-// Copyright (c) 2017 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-```
-
-## Sign your work
-
-The sign-off is a simple line at the end of the explanation for the
-patch, which certifies that you wrote it or otherwise have the right to
-pass it on as an open-source patch. The rules are pretty simple: if you
-can certify the below (from
-[developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-You can add the sign off when creating the git commit via `git commit -s`.
-
-If you want this to be automatic you can set up some aliases:
-
-```
-git config --add alias.amend "commit -s --amend"
-git config --add alias.c "commit -s"
-```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
deleted file mode 100644
index 068953d4bd..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/DCO
+++ /dev/null
@@ -1,37 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
deleted file mode 100644
index 268289bb41..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
+++ /dev/null
@@ -1,301 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
- name = "github.com/HdrHistogram/hdrhistogram-go"
- packages = ["."]
- pruneopts = "UT"
- revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
- version = "0.9.0"
-
-[[projects]]
- digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "UT"
- revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
- version = "v1.0.1"
-
-[[projects]]
- branch = "master"
- digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
- name = "github.com/crossdock/crossdock-go"
- packages = [
- ".",
- "assert",
- "require",
- ]
- pruneopts = "UT"
- revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
-
-[[projects]]
- digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = "UT"
- revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
- name = "github.com/golang/mock"
- packages = ["gomock"]
- pruneopts = "UT"
- revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d"
- version = "v1.4.4"
-
-[[projects]]
- digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/timestamp",
- ]
- pruneopts = "UT"
- revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17"
- version = "v1.4.2"
-
-[[projects]]
- digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "UT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01"
- name = "github.com/opentracing/opentracing-go"
- packages = [
- ".",
- "ext",
- "harness",
- "log",
- ]
- pruneopts = "UT"
- revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = "UT"
- revision = "614d223910a179a466c1767a985424175c39b465"
- version = "v0.9.1"
-
-[[projects]]
- digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
- name = "github.com/pmezard/go-difflib"
- packages = ["difflib"]
- pruneopts = "UT"
- revision = "792786c7400a136282c1664665ae0a8db921c6c2"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- ]
- pruneopts = "UT"
- revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "UT"
- revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "UT"
- revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc"
- version = "v0.14.0"
-
-[[projects]]
- digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/fs",
- "internal/util",
- ]
- pruneopts = "UT"
- revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e"
- name = "github.com/stretchr/objx"
- packages = ["."]
- pruneopts = "UT"
- revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6"
- version = "v0.3.0"
-
-[[projects]]
- digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea"
- name = "github.com/stretchr/testify"
- packages = [
- "assert",
- "mock",
- "require",
- "suite",
- ]
- pruneopts = "UT"
- revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
- version = "v1.6.1"
-
-[[projects]]
- digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61"
- name = "github.com/uber/jaeger-lib"
- packages = [
- "metrics",
- "metrics/metricstest",
- "metrics/prometheus",
- ]
- pruneopts = "UT"
- revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4"
- version = "v2.3.0"
-
-[[projects]]
- digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f"
- name = "go.uber.org/atomic"
- packages = ["."]
- pruneopts = "UT"
- revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182"
- version = "v1.7.0"
-
-[[projects]]
- digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298"
- name = "go.uber.org/multierr"
- packages = ["."]
- pruneopts = "UT"
- revision = "3114a8b704d2d28dbacda34a872690aaef66aeed"
- version = "v1.6.0"
-
-[[projects]]
- digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236"
- name = "go.uber.org/zap"
- packages = [
- ".",
- "buffer",
- "internal/bufferpool",
- "internal/color",
- "internal/exit",
- "zapcore",
- "zaptest/observer",
- ]
- pruneopts = "UT"
- revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510"
- version = "v1.16.0"
-
-[[projects]]
- branch = "master"
- digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- ]
- pruneopts = "UT"
- revision = "328152dc79b1547da63f950cd4cdd9afd50b2774"
-
-[[projects]]
- branch = "master"
- digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86"
- name = "golang.org/x/sys"
- packages = [
- "internal/unsafeheader",
- "unix",
- "windows",
- ]
- pruneopts = "UT"
- revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9"
-
-[[projects]]
- digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1"
- name = "google.golang.org/protobuf"
- packages = [
- "encoding/prototext",
- "encoding/protowire",
- "internal/descfmt",
- "internal/descopts",
- "internal/detrand",
- "internal/encoding/defval",
- "internal/encoding/messageset",
- "internal/encoding/tag",
- "internal/encoding/text",
- "internal/errors",
- "internal/fieldsort",
- "internal/filedesc",
- "internal/filetype",
- "internal/flags",
- "internal/genid",
- "internal/impl",
- "internal/mapsort",
- "internal/pragma",
- "internal/set",
- "internal/strs",
- "internal/version",
- "proto",
- "reflect/protoreflect",
- "reflect/protoregistry",
- "runtime/protoiface",
- "runtime/protoimpl",
- "types/known/anypb",
- "types/known/durationpb",
- "types/known/timestamppb",
- ]
- pruneopts = "UT"
- revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1"
- version = "v1.25.0"
-
-[[projects]]
- branch = "v3"
- digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b"
- name = "gopkg.in/yaml.v3"
- packages = ["."]
- pruneopts = "UT"
- revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "github.com/crossdock/crossdock-go",
- "github.com/golang/mock/gomock",
- "github.com/opentracing/opentracing-go",
- "github.com/opentracing/opentracing-go/ext",
- "github.com/opentracing/opentracing-go/harness",
- "github.com/opentracing/opentracing-go/log",
- "github.com/pkg/errors",
- "github.com/prometheus/client_golang/prometheus",
- "github.com/stretchr/testify/assert",
- "github.com/stretchr/testify/mock",
- "github.com/stretchr/testify/require",
- "github.com/stretchr/testify/suite",
- "github.com/uber/jaeger-lib/metrics",
- "github.com/uber/jaeger-lib/metrics/metricstest",
- "github.com/uber/jaeger-lib/metrics/prometheus",
- "go.uber.org/atomic",
- "go.uber.org/zap",
- "go.uber.org/zap/zapcore",
- "go.uber.org/zap/zaptest/observer",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
deleted file mode 100644
index 3aa307a904..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
+++ /dev/null
@@ -1,31 +0,0 @@
-[[constraint]]
- name = "github.com/crossdock/crossdock-go"
- branch = "master"
-
-[[constraint]]
- name = "github.com/opentracing/opentracing-go"
- version = "^1.2"
-
-[[constraint]]
- name = "github.com/prometheus/client_golang"
- version = "^1"
-
-[[constraint]]
- name = "github.com/stretchr/testify"
- version = "^1.1.3"
-
-[[constraint]]
- name = "go.uber.org/atomic"
- version = "^1"
-
-[[constraint]]
- name = "github.com/uber/jaeger-lib"
- version = "^2.3"
-
-[[constraint]]
- name = "go.uber.org/zap"
- version = "^1"
-
-[prune]
- go-tests = true
- unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
deleted file mode 100644
index ee7b21268a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ /dev/null
@@ -1,135 +0,0 @@
-PROJECT_ROOT=github.com/uber/jaeger-client-go
-export GO111MODULE=off
-PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
-# all .go files that don't exist in hidden directories
-ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
- -e ".*/\..*" \
- -e ".*/_.*" \
- -e ".*/mocks.*")
-
-USE_DEP := true
-
--include crossdock/rules.mk
-
-RACE=-race
-GOTEST=go test -v $(RACE)
-GOLINT=golint
-GOVET=go vet
-GOFMT=gofmt
-FMT_LOG=fmt.log
-LINT_LOG=lint.log
-
-THRIFT_VER=0.14
-THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
-THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift
-
-PASS=$(shell printf "\033[32mPASS\033[0m")
-FAIL=$(shell printf "\033[31mFAIL\033[0m")
-COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
-
-.DEFAULT_GOAL := test-and-lint
-
-.PHONY: test-and-lint
-test-and-lint: test fmt lint
-
-.PHONY: test
-test:
-ifeq ($(USE_DEP),true)
- dep check
-endif
- bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
-
-.PHONY: fmt
-fmt:
- $(GOFMT) -e -s -l -w $(ALL_SRC)
- ./scripts/updateLicenses.sh
-
-.PHONY: lint
-lint: vet golint lint-fmt lint-thrift-testing
-
-.PHONY: vet
-vet:
- $(GOVET) $(PACKAGES)
-
-.PHONY: golint
-golint:
- @cat /dev/null > $(LINT_LOG)
- @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
- @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
-
-.PHONY: lint-fmt
-lint-fmt:
- @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
- ./scripts/updateLicenses.sh >> $(FMT_LOG)
- @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
-
-# make sure thrift/ module does not import "testing"
-.PHONY: lint-thrift-testing
-lint-thrift-testing:
- @cat /dev/null > $(LINT_LOG)
- @(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true
- @[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false)
-
-.PHONY: install
-install:
- @echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
-ifeq ($(USE_DEP),true)
- dep version || make install-dep
- dep ensure -vendor-only -v
-endif
-ifeq ($(USE_GLIDE),true)
- glide --version || go get github.com/Masterminds/glide
- glide install
-endif
-
-
-.PHONY: cover
-cover:
- $(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
-
-.PHONY: cover-html
-cover-html: cover
- go tool cover -html=cover.out -o cover.html
-
-# This is not part of the regular test target because we don't want to slow it
-# down.
-.PHONY: test-examples
-test-examples:
- make -C examples
-
-.PHONY: thrift
-thrift: idl-submodule thrift-compile
-
-# TODO at the moment we're not generating tchan_*.go files
-.PHONY: thrift-compile
-thrift-compile: thrift-image
- docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh
-
-.PHONY: idl-submodule
-idl-submodule:
- git submodule init
- git submodule update
-
-.PHONY: thrift-image
-thrift-image:
- $(THRIFT) -version
-
-.PHONY: install-dep
-install-dep:
- - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
- - chmod +x $$GOPATH/bin/dep
-
-.PHONY: install-ci
-install-ci: install
- go get github.com/wadey/gocovmerge
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/lint/golint
-
-.PHONY: test-ci
-test-ci: cover
-ifeq ($(CI_SKIP_LINT),true)
- echo 'skipping lint'
-else
- make lint
-endif
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
deleted file mode 100644
index e23912b35a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ /dev/null
@@ -1,339 +0,0 @@
-[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
-
-# 🛑 This library is being deprecated!
-
-We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details.
-
-# Jaeger Bindings for Go OpenTracing API
-
-Instrumentation library that implements an
-[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
-
-**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
- * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
- * :x: `import "github.com/jaegertracing/jaeger-client-go"`
-
-## How to Contribute
-
-Please see [CONTRIBUTING.md](CONTRIBUTING.md).
-
-## Installation
-
-### Preferred
-
-Add `github.com/uber/jaeger-client-go` to `go.mod`.
-
-### Old way
-
-We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
-and [semantic versioning](http://semver.org/) when including this library into an application.
-For example, Jaeger backend imports this library like this:
-
-```toml
-[[constraint]]
- name = "github.com/uber/jaeger-client-go"
- version = "2.17"
-```
-
-If you instead want to use the latest version in `master`, you can pull it via `go get`.
-Note that during `go get` you may see build errors due to incompatible dependencies, which is why
-we recommend using semantic versions for dependencies. The error may be fixed by running
-`make install` (it will install `dep` if you don't have it):
-
-```shell
-go get -u github.com/uber/jaeger-client-go/
-cd $GOPATH/src/github.com/uber/jaeger-client-go/
-git submodule update --init --recursive
-make install
-```
-
-## Initialization
-
-See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples)
-and [config/example_test.go](./config/example_test.go).
-
-There are two ways to create a tracer:
- * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section).
- * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions.
-
-### Environment variables
-
-The tracer can be initialized with values coming from environment variables, if it is
-[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
-that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
-None of the env vars are required and all of them can be overridden via direct setting
-of the property on the configuration object.
-
-Property| Description
---- | ---
-JAEGER_SERVICE_NAME | The service name.
-JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
-JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
-JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
-JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
-JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
-JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
-JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
-JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
-JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
-JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
-JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
-JAEGER_SAMPLER_PARAM | The sampler parameter (number).
-JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
-JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
-JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
-JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
-JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
-JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids.
-JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
-JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
-
-By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
-`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
-to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
-secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
-variables.
-
-### Closing the tracer via `io.Closer`
-
-The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
-It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
-before exiting, e.g.
-
-```go
-tracer, closer, err := cfg.NewTracer(...)
-defer closer.Close()
-```
-
-This is especially useful for command-line tools that enable tracing, as well as
-for the long-running apps that support graceful shutdown. For example, if your deployment
-system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
-exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
-
-### Metrics & Monitoring
-
-The tracer emits a number of different metrics, defined in
-[metrics.go](metrics.go). The monitoring backend is expected to support
-tag-based metric names, e.g. instead of `statsd`-style string names
-like `counters.my-service.jaeger.spans.started.sampled`, the metrics
-are defined by a short name and a collection of key/value tags, for
-example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
-file for the full list and descriptions of emitted metrics.
-
-The monitoring backend is represented by the `metrics.Factory` interface from package
-[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
-of that interface can be passed as an option to either the Configuration object or the Tracer
-constructor, for example:
-
-```go
-import (
- "github.com/uber/jaeger-client-go/config"
- "github.com/uber/jaeger-lib/metrics/prometheus"
-)
-
- metricsFactory := prometheus.New()
- tracer, closer, err := config.Configuration{
- ServiceName: "your-service-name",
- }.NewTracer(
- config.Metrics(metricsFactory),
- )
-```
-
-By default, a no-op `metrics.NullFactory` is used.
-
-### Logging
-
-The tracer can be configured with an optional logger, which will be
-used to log communication errors, or log spans if a logging reporter
-option is specified in the configuration. The logging API is abstracted
-by the [Logger](logger.go) interface. A logger instance implementing
-this interface can be set on the `Config` object before calling the
-`New` method.
-
-Besides the [zap](https://github.com/uber-go/zap) implementation
-bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
-one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
-
-## Instrumentation for Tracing
-
-Since this tracer is fully compliant with OpenTracing API 1.0,
-all code instrumentation should only use the API itself, as described
-in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
-
-## Features
-
-### Reporters
-
-A "reporter" is a component that receives the finished spans and reports
-them to somewhere. Under normal circumstances, the Tracer
-should use the default `RemoteReporter`, which sends the spans out of
-process via configurable "transport". For testing purposes, one can
-use an `InMemoryReporter` that accumulates spans in a buffer and
-allows to retrieve them for later verification. Also available are
-`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
-which logs all finished spans using their `String()` method, and a
-`CompositeReporter` that can be used to combine more than one reporter
-into one, e.g. to attach a logging reporter to the main remote reporter.
-
-### Span Reporting Transports
-
-The remote reporter uses "transports" to actually send the spans out
-of process. Currently the supported transports include:
- * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
- * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
-
-### Sampling
-
-The tracer does not record all spans, but only those that have the
-sampling bit set in the `flags`. When a new trace is started and a new
-unique ID is generated, a sampling decision is made whether this trace
-should be sampled. The sampling decision is propagated to all downstream
-calls via the `flags` field of the trace context. The following samplers
-are available:
- 1. `RemotelyControlledSampler` uses one of the other simpler samplers
- and periodically updates it by polling an external server. This
- allows dynamic control of the sampling strategies.
- 1. `ConstSampler` always makes the same sampling decision for all
- trace IDs. it can be configured to either sample all traces, or
- to sample none.
- 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
- for a given trace to be sampled. The actual decision is made by
- comparing the trace ID with a random number multiplied by the
- sampling rate.
- 1. `RateLimitingSampler` can be used to allow only a certain fixed
- number of traces to be sampled per second.
-
-#### Delayed sampling
-
-Version 2.20 introduced the ability to delay sampling decisions in the life cycle
-of the root span. It involves several features and architectural changes:
- * **Shared sampling state**: the sampling state is shared across all local
- (i.e. in-process) spans for a given trace.
- * **New `SamplerV2` API** allows the sampler to be called at multiple points
- in the life cycle of a span:
- * on span creation
- * on overwriting span operation name
- * on setting span tags
- * on finishing the span
- * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
- to indicate if the negative sampling decision is final or not (positive sampling
- decisions are always final). If the decision is not final, the sampler will be
- called again on further span life cycle events, like setting tags.
-
-These new features are used in the experimental `x.TagMatchingSampler`, which
-can sample a trace based on a certain tag added to the root
-span or one of its local (in-process) children. The sampler can be used with
-another experimental `x.PrioritySampler` that allows multiple samplers to try
-to make a sampling decision, in a certain priority order.
-
-### Baggage Injection
-
-The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
-to the span context and propagated throughout the trace. An external process can inject baggage
-by setting the special HTTP Header `jaeger-baggage` on a request:
-
-```sh
-curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
-```
-
-Baggage can also be programatically set inside your service:
-
-```go
-if span := opentracing.SpanFromContext(ctx); span != nil {
- span.SetBaggageItem("key", "value")
-}
-```
-
-Another service downstream of that can retrieve the baggage in a similar way:
-
-```go
-if span := opentracing.SpanFromContext(ctx); span != nil {
- val := span.BaggageItem("key")
- println(val)
-}
-```
-
-### Debug Traces (Forced Sampling)
-
-#### Programmatically
-
-The OpenTracing API defines a `sampling.priority` standard tag that
-can be used to affect the sampling of a span and its children:
-
-```go
-import (
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
-)
-
-span := opentracing.SpanFromContext(ctx)
-ext.SamplingPriority.Set(span, 1)
-```
-
-#### Via HTTP Headers
-
-Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
-which can be set in the incoming request, e.g.
-
-```sh
-curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
-```
-
-When Jaeger sees this header in the request that otherwise has no
-tracing context, it ensures that the new trace started for this
-request will be sampled in the "debug" mode (meaning it should survive
-all downsampling that might happen in the collection pipeline), and the
-root span will have a tag as if this statement was executed:
-
-```go
-span.SetTag("jaeger-debug-id", "some-correlation-id")
-```
-
-This allows using Jaeger UI to find the trace by this tag.
-
-### Zipkin HTTP B3 compatible header propagation
-
-Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
-by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
-
-However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
-
-## SelfRef
-
-Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
-to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
-but not yet accepted. This allows the caller to provide an already created `SpanContext`
-when starting a new span. The `Self` reference bypasses trace and span id generation,
-as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
-set appropriately by the caller).
-
-The `Self` reference supports the following use cases:
- * the ability to provide externally generated trace and span IDs
- * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
-
-Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
-```
-span := tracer.StartSpan(
- "continued_span",
- jaeger.SelfRef(yourSpanContext),
-)
-...
-defer span.Finish()
-```
-
-## License
-
-[Apache 2.0 License](LICENSE).
-
-
-[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg
-[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go
-[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
-[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
-[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
-[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
-[ot-url]: http://opentracing.io
-[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
-[timeunits]: https://golang.org/pkg/time/#ParseDuration
-[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
deleted file mode 100644
index 12438d8416..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Release Process
-
-1. Create a PR "Preparing for release X.Y.Z" against master branch
- * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)`
- * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
- * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
-2. Create a release "Release X.Y.Z" on Github
- * Create Tag `vX.Y.Z`
- * Copy CHANGELOG.md into the release notes
-3. Create a PR "Back to development" against master branch
- * Add ` (unreleased)` to CHANGELOG.md
- * Update `JaegerClientVersion` in constants.go to `Go-dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
deleted file mode 100644
index 1037ca0e86..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go/log"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
-)
-
-// baggageSetter is an actor that can set a baggage value on a Span given certain
-// restrictions (eg. maxValueLength).
-type baggageSetter struct {
- restrictionManager baggage.RestrictionManager
- metrics *Metrics
-}
-
-func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
- return &baggageSetter{
- restrictionManager: restrictionManager,
- metrics: metrics,
- }
-}
-
-// (NB) span should hold the lock before making this call
-func (s *baggageSetter) setBaggage(span *Span, key, value string) {
- var truncated bool
- var prevItem string
- restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
- if !restriction.KeyAllowed() {
- s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
- s.metrics.BaggageUpdateFailure.Inc(1)
- return
- }
- if len(value) > restriction.MaxValueLength() {
- truncated = true
- value = value[:restriction.MaxValueLength()]
- s.metrics.BaggageTruncate.Inc(1)
- }
- prevItem = span.context.baggage[key]
- s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
- span.context = span.context.WithBaggageItem(key, value)
- s.metrics.BaggageUpdateSuccess.Inc(1)
-}
-
-func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
- if !span.context.IsSampled() {
- return
- }
- fields := []log.Field{
- log.String("event", "baggage"),
- log.String("key", key),
- log.String("value", value),
- }
- if prevItem != "" {
- fields = append(fields, log.String("override", "true"))
- }
- if truncated {
- fields = append(fields, log.String("truncated", "true"))
- }
- if !valid {
- fields = append(fields, log.String("invalid", "true"))
- }
- span.logFieldsNoLocking(fields...)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
deleted file mode 100644
index 06676350b7..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- "errors"
- "fmt"
- "io"
- "strings"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/uber/jaeger-client-go/utils"
-
- "github.com/uber/jaeger-client-go"
- "github.com/uber/jaeger-client-go/internal/baggage/remote"
- throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
- "github.com/uber/jaeger-client-go/rpcmetrics"
- "github.com/uber/jaeger-client-go/transport"
- "github.com/uber/jaeger-lib/metrics"
-)
-
-const defaultSamplingProbability = 0.001
-
-// Configuration configures and creates Jaeger Tracer
-type Configuration struct {
- // ServiceName specifies the service name to use on the tracer.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
- ServiceName string `yaml:"serviceName"`
-
- // Disabled makes the config return opentracing.NoopTracer.
- // Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED.
- Disabled bool `yaml:"disabled"`
-
- // RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided).
- // Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
- RPCMetrics bool `yaml:"rpc_metrics"`
-
- // Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context.
- // Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT.
- Gen128Bit bool `yaml:"traceid_128bit"`
-
- // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
- Tags []opentracing.Tag `yaml:"tags"`
-
- Sampler *SamplerConfig `yaml:"sampler"`
- Reporter *ReporterConfig `yaml:"reporter"`
- Headers *jaeger.HeadersConfig `yaml:"headers"`
- BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
- Throttler *ThrottlerConfig `yaml:"throttler"`
-}
-
-// SamplerConfig allows initializing a non-default sampler. All fields are optional.
-type SamplerConfig struct {
- // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
- Type string `yaml:"type"`
-
- // Param is a value passed to the sampler.
- // Valid values for Param field are:
- // - for "const" sampler, 0 or 1 for always false/true respectively
- // - for "probabilistic" sampler, a probability between 0 and 1
- // - for "rateLimiting" sampler, the number of spans per second
- // - for "remote" sampler, param is the same as for "probabilistic"
- // and indicates the initial sampling rate before the actual one
- // is received from the mothership.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
- Param float64 `yaml:"param"`
-
- // SamplingServerURL is the URL of sampling manager that can provide
- // sampling strategy to this service.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
- SamplingServerURL string `yaml:"samplingServerURL"`
-
- // SamplingRefreshInterval controls how often the remotely controlled sampler will poll
- // sampling manager for the appropriate sampling strategy.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
- SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
-
- // MaxOperations is the maximum number of operations that the PerOperationSampler
- // will keep track of. If an operation is not tracked, a default probabilistic
- // sampler will be used rather than the per operation specific sampler.
- // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
- MaxOperations int `yaml:"maxOperations"`
-
- // Opt-in feature for applications that require late binding of span name via explicit
- // call to SetOperationName when using PerOperationSampler. When this feature is enabled,
- // the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
- // decision as non-final (and the span as writeable). This may lead to degraded performance
- // in applications that always provide the correct span name on trace creation.
- //
- // For backwards compatibility this option is off by default.
- OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
-
- // Options can be used to programmatically pass additional options to the Remote sampler.
- Options []jaeger.SamplerOption
-}
-
-// ReporterConfig configures the reporter. All fields are optional.
-type ReporterConfig struct {
- // QueueSize controls how many spans the reporter can keep in memory before it starts dropping
- // new spans. The queue is continuously drained by a background go-routine, as fast as spans
- // can be sent out of process.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
- QueueSize int `yaml:"queueSize"`
-
- // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
- // It is generally not useful, as it only matters for very low traffic services.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
- BufferFlushInterval time.Duration
-
- // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
- // and logs all submitted spans. Main Configuration.Logger must be initialized in the code
- // for this option to have any effect.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
- LogSpans bool `yaml:"logSpans"`
-
- // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
- // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
- LocalAgentHostPort string `yaml:"localAgentHostPort"`
-
- // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
- // the agent's hostname and reconnects if there was a change. This option only
- // applies if LocalAgentHostPort is specified.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
- DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
-
- // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
- // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
- // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
- AttemptReconnectInterval time.Duration
-
- // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
- // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
- CollectorEndpoint string `yaml:"collectorEndpoint"`
-
- // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
- // Can be provided by FromEnv() via the environment variable named JAEGER_USER
- User string `yaml:"user"`
-
- // Password instructs reporter to include a password for basic http authentication when sending spans to
- // jaeger-collector.
- // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
- Password string `yaml:"password"`
-
- // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
- // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
- HTTPHeaders map[string]string `yaml:"http_headers"`
-}
-
-// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
-// certain baggage keys. All fields are optional.
-type BaggageRestrictionsConfig struct {
- // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
- // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
- // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
- // restrictions have been retrieved from jaeger-agent.
- DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
-
- // HostPort is the hostPort of jaeger-agent's baggage restrictions server
- HostPort string `yaml:"hostPort"`
-
- // RefreshInterval controls how often the baggage restriction manager will poll
- // jaeger-agent for the most recent baggage restrictions.
- RefreshInterval time.Duration `yaml:"refreshInterval"`
-}
-
-// ThrottlerConfig configures the throttler which can be used to throttle the
-// rate at which the client may send debug requests.
-type ThrottlerConfig struct {
- // HostPort of jaeger-agent's credit server.
- HostPort string `yaml:"hostPort"`
-
- // RefreshInterval controls how often the throttler will poll jaeger-agent
- // for more throttling credits.
- RefreshInterval time.Duration `yaml:"refreshInterval"`
-
- // SynchronousInitialization determines whether or not the throttler should
- // synchronously fetch credits from the agent when an operation is seen for
- // the first time. This should be set to true if the client will be used by
- // a short lived service that needs to ensure that credits are fetched
- // upfront such that sampling or throttling occurs.
- SynchronousInitialization bool `yaml:"synchronousInitialization"`
-}
-
-type nullCloser struct{}
-
-func (*nullCloser) Close() error { return nil }
-
-// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
-// before shutdown.
-//
-// Deprecated: use NewTracer() function
-func (c Configuration) New(
- serviceName string,
- options ...Option,
-) (opentracing.Tracer, io.Closer, error) {
- if serviceName != "" {
- c.ServiceName = serviceName
- }
-
- return c.NewTracer(options...)
-}
-
-// NewTracer returns a new tracer based on the current configuration, using the given options,
-// and a closer func that can be used to flush buffers before shutdown.
-func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
- if c.Disabled {
- return &opentracing.NoopTracer{}, &nullCloser{}, nil
- }
-
- if c.ServiceName == "" {
- return nil, nil, errors.New("no service name provided")
- }
-
- opts := applyOptions(options...)
- tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
- if c.RPCMetrics {
- Observer(
- rpcmetrics.NewObserver(
- opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
- rpcmetrics.DefaultNameNormalizer,
- ),
- )(&opts) // adds to c.observers
- }
- if c.Sampler == nil {
- c.Sampler = &SamplerConfig{
- Type: jaeger.SamplerTypeRemote,
- Param: defaultSamplingProbability,
- }
- }
- if c.Reporter == nil {
- c.Reporter = &ReporterConfig{}
- }
-
- sampler := opts.sampler
- if sampler == nil {
- s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
- if err != nil {
- return nil, nil, err
- }
- sampler = s
- }
-
- reporter := opts.reporter
- if reporter == nil {
- r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
- if err != nil {
- return nil, nil, err
- }
- reporter = r
- }
-
- tracerOptions := []jaeger.TracerOption{
- jaeger.TracerOptions.Metrics(tracerMetrics),
- jaeger.TracerOptions.Logger(opts.logger),
- jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
- jaeger.TracerOptions.PoolSpans(opts.poolSpans),
- jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
- jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
- jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
- }
-
- if c.Gen128Bit || opts.gen128Bit {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true))
- }
-
- if opts.randomNumber != nil {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber))
- }
-
- for _, tag := range opts.tags {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
- }
-
- for _, tag := range c.Tags {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
- }
-
- for _, obs := range opts.observers {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
- }
-
- for _, cobs := range opts.contribObservers {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
- }
-
- for format, injector := range opts.injectors {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
- }
-
- for format, extractor := range opts.extractors {
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
- }
-
- if c.BaggageRestrictions != nil {
- mgr := remote.NewRestrictionManager(
- c.ServiceName,
- remote.Options.Metrics(tracerMetrics),
- remote.Options.Logger(opts.logger),
- remote.Options.HostPort(c.BaggageRestrictions.HostPort),
- remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
- remote.Options.DenyBaggageOnInitializationFailure(
- c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
- ),
- )
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
- }
-
- if c.Throttler != nil {
- debugThrottler := throttler.NewThrottler(
- c.ServiceName,
- throttler.Options.Metrics(tracerMetrics),
- throttler.Options.Logger(opts.logger),
- throttler.Options.HostPort(c.Throttler.HostPort),
- throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
- throttler.Options.SynchronousInitialization(
- c.Throttler.SynchronousInitialization,
- ),
- )
-
- tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
- }
-
- tracer, closer := jaeger.NewTracer(
- c.ServiceName,
- sampler,
- reporter,
- tracerOptions...,
- )
-
- return tracer, closer, nil
-}
-
-// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
-// It returns a closer func that can be used to flush buffers before shutdown.
-func (c Configuration) InitGlobalTracer(
- serviceName string,
- options ...Option,
-) (io.Closer, error) {
- if c.Disabled {
- return &nullCloser{}, nil
- }
- tracer, closer, err := c.New(serviceName, options...)
- if err != nil {
- return nil, err
- }
- opentracing.SetGlobalTracer(tracer)
- return closer, nil
-}
-
-// NewSampler creates a new sampler based on the configuration
-func (sc *SamplerConfig) NewSampler(
- serviceName string,
- metrics *jaeger.Metrics,
-) (jaeger.Sampler, error) {
- samplerType := strings.ToLower(sc.Type)
- if samplerType == jaeger.SamplerTypeConst {
- return jaeger.NewConstSampler(sc.Param != 0), nil
- }
- if samplerType == jaeger.SamplerTypeProbabilistic {
- if sc.Param >= 0 && sc.Param <= 1.0 {
- return jaeger.NewProbabilisticSampler(sc.Param)
- }
- return nil, fmt.Errorf(
- "invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
- sc.Param,
- )
- }
- if samplerType == jaeger.SamplerTypeRateLimiting {
- return jaeger.NewRateLimitingSampler(sc.Param), nil
- }
- if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
- sc2 := *sc
- sc2.Type = jaeger.SamplerTypeProbabilistic
- initSampler, err := sc2.NewSampler(serviceName, nil)
- if err != nil {
- return nil, err
- }
- options := []jaeger.SamplerOption{
- jaeger.SamplerOptions.Metrics(metrics),
- jaeger.SamplerOptions.InitialSampler(initSampler),
- jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
- jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
- jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
- jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
- }
- options = append(options, sc.Options...)
- return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
- }
- return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
-}
-
-// NewReporter instantiates a new reporter that submits spans to the collector
-func (rc *ReporterConfig) NewReporter(
- serviceName string,
- metrics *jaeger.Metrics,
- logger jaeger.Logger,
-) (jaeger.Reporter, error) {
- sender, err := rc.newTransport(logger)
- if err != nil {
- return nil, err
- }
- reporter := jaeger.NewRemoteReporter(
- sender,
- jaeger.ReporterOptions.QueueSize(rc.QueueSize),
- jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
- jaeger.ReporterOptions.Logger(logger),
- jaeger.ReporterOptions.Metrics(metrics))
- if rc.LogSpans && logger != nil {
- logger.Infof("Initializing logging reporter")
- reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
- }
- return reporter, err
-}
-
-func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
- switch {
- case rc.CollectorEndpoint != "":
- httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)}
- if rc.User != "" && rc.Password != "" {
- httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
- }
- return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
- default:
- return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
- AgentClientUDPParams: utils.AgentClientUDPParams{
- HostPort: rc.LocalAgentHostPort,
- Logger: logger,
- DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
- AttemptReconnectInterval: rc.AttemptReconnectInterval,
- },
- })
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
deleted file mode 100644
index 0fc3c53fd3..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- "fmt"
- "net/url"
- "os"
- "strconv"
- "strings"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/pkg/errors"
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- // environment variable names
- envServiceName = "JAEGER_SERVICE_NAME"
- envDisabled = "JAEGER_DISABLED"
- envRPCMetrics = "JAEGER_RPC_METRICS"
- envTags = "JAEGER_TAGS"
- envSamplerType = "JAEGER_SAMPLER_TYPE"
- envSamplerParam = "JAEGER_SAMPLER_PARAM"
- envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
- envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
- envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
- envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
- envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
- envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
- envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
- envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
- envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
- envEndpoint = "JAEGER_ENDPOINT"
- envUser = "JAEGER_USER"
- envPassword = "JAEGER_PASSWORD"
- envAgentHost = "JAEGER_AGENT_HOST"
- envAgentPort = "JAEGER_AGENT_PORT"
- env128bit = "JAEGER_TRACEID_128BIT"
-)
-
-// FromEnv uses environment variables to set the tracer's Configuration
-func FromEnv() (*Configuration, error) {
- c := &Configuration{}
- return c.FromEnv()
-}
-
-// FromEnv uses environment variables and overrides existing tracer's Configuration
-func (c *Configuration) FromEnv() (*Configuration, error) {
- if e := os.Getenv(envServiceName); e != "" {
- c.ServiceName = e
- }
-
- if e := os.Getenv(envRPCMetrics); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- c.RPCMetrics = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
- }
- }
-
- if e := os.Getenv(envDisabled); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- c.Disabled = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
- }
- }
-
- if e := os.Getenv(envTags); e != "" {
- c.Tags = parseTags(e)
- }
-
- if e := os.Getenv(env128bit); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- c.Gen128Bit = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e)
- }
- }
-
- if c.Sampler == nil {
- c.Sampler = &SamplerConfig{}
- }
-
- if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
- c.Sampler = s
- } else {
- return nil, errors.Wrap(err, "cannot obtain sampler config from env")
- }
-
- if c.Reporter == nil {
- c.Reporter = &ReporterConfig{}
- }
-
- if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
- c.Reporter = r
- } else {
- return nil, errors.Wrap(err, "cannot obtain reporter config from env")
- }
-
- return c, nil
-}
-
-// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
-func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
- if e := os.Getenv(envSamplerType); e != "" {
- sc.Type = e
- }
-
- if e := os.Getenv(envSamplerParam); e != "" {
- if value, err := strconv.ParseFloat(e, 64); err == nil {
- sc.Param = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
- }
- }
-
- if e := os.Getenv(envSamplingEndpoint); e != "" {
- sc.SamplingServerURL = e
- } else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
- sc.SamplingServerURL = e
- } else if e := os.Getenv(envAgentHost); e != "" {
- // Fallback if we know the agent host - try the sampling endpoint there
- sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
- }
-
- if e := os.Getenv(envSamplerMaxOperations); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- sc.MaxOperations = int(value)
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
- }
- }
-
- if e := os.Getenv(envSamplerRefreshInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- sc.SamplingRefreshInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
- }
- }
-
- return sc, nil
-}
-
-// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
-func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
- if e := os.Getenv(envReporterMaxQueueSize); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- rc.QueueSize = int(value)
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
- }
- }
-
- if e := os.Getenv(envReporterFlushInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- rc.BufferFlushInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
- }
- }
-
- if e := os.Getenv(envReporterLogSpans); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- rc.LogSpans = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
- }
- }
-
- if e := os.Getenv(envEndpoint); e != "" {
- u, err := url.ParseRequestURI(e)
- if err != nil {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
- }
- rc.CollectorEndpoint = u.String()
- user := os.Getenv(envUser)
- pswd := os.Getenv(envPassword)
- if user != "" && pswd == "" || user == "" && pswd != "" {
- return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
- }
- rc.User = user
- rc.Password = pswd
- } else {
- useEnv := false
- host := jaeger.DefaultUDPSpanServerHost
- if e := os.Getenv(envAgentHost); e != "" {
- host = e
- useEnv = true
- }
-
- port := jaeger.DefaultUDPSpanServerPort
- if e := os.Getenv(envAgentPort); e != "" {
- if value, err := strconv.ParseInt(e, 10, 0); err == nil {
- port = int(value)
- useEnv = true
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
- }
- }
- if useEnv || rc.LocalAgentHostPort == "" {
- rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
- }
-
- if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
- if value, err := strconv.ParseBool(e); err == nil {
- rc.DisableAttemptReconnecting = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
- }
- }
-
- if !rc.DisableAttemptReconnecting {
- if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
- if value, err := time.ParseDuration(e); err == nil {
- rc.AttemptReconnectInterval = value
- } else {
- return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
- }
- }
- }
- }
-
- return rc, nil
-}
-
-// parseTags parses the given string into a collection of Tags.
-// Spec for this value:
-// - comma separated list of key=value
-// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
-// is an environment variable and `defaultValue` is the value to use in case the env var is not set
-func parseTags(sTags string) []opentracing.Tag {
- pairs := strings.Split(sTags, ",")
- tags := make([]opentracing.Tag, 0)
- for _, p := range pairs {
- kv := strings.SplitN(p, "=", 2)
- k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
-
- if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
- ed := strings.SplitN(v[2:len(v)-1], ":", 2)
- e, d := ed[0], ed[1]
- v = os.Getenv(e)
- if v == "" && d != "" {
- v = d
- }
- }
-
- tag := opentracing.Tag{Key: k, Value: v}
- tags = append(tags, tag)
- }
-
- return tags
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
deleted file mode 100644
index a2b9cbc28b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/config/options.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package config
-
-import (
- opentracing "github.com/opentracing/opentracing-go"
- "github.com/uber/jaeger-lib/metrics"
-
- "github.com/uber/jaeger-client-go"
-)
-
-// Option is a function that sets some option on the client.
-type Option func(c *Options)
-
-// Options control behavior of the client.
-type Options struct {
- metrics metrics.Factory
- logger jaeger.Logger
- reporter jaeger.Reporter
- sampler jaeger.Sampler
- contribObservers []jaeger.ContribObserver
- observers []jaeger.Observer
- gen128Bit bool
- poolSpans bool
- zipkinSharedRPCSpan bool
- maxTagValueLength int
- noDebugFlagOnForcedSampling bool
- tags []opentracing.Tag
- injectors map[interface{}]jaeger.Injector
- extractors map[interface{}]jaeger.Extractor
- randomNumber func() uint64
-}
-
-// Metrics creates an Option that initializes Metrics in the tracer,
-// which is used to emit statistics about spans.
-func Metrics(factory metrics.Factory) Option {
- return func(c *Options) {
- c.metrics = factory
- }
-}
-
-// Logger can be provided to log Reporter errors, as well as to log spans
-// if Reporter.LogSpans is set to true.
-func Logger(logger jaeger.Logger) Option {
- return func(c *Options) {
- c.logger = logger
- }
-}
-
-// Reporter can be provided explicitly to override the configuration.
-// Useful for testing, e.g. by passing InMemoryReporter.
-func Reporter(reporter jaeger.Reporter) Option {
- return func(c *Options) {
- c.reporter = reporter
- }
-}
-
-// Sampler can be provided explicitly to override the configuration.
-func Sampler(sampler jaeger.Sampler) Option {
- return func(c *Options) {
- c.sampler = sampler
- }
-}
-
-// Observer can be registered with the Tracer to receive notifications about new Spans.
-func Observer(observer jaeger.Observer) Option {
- return func(c *Options) {
- c.observers = append(c.observers, observer)
- }
-}
-
-// ContribObserver can be registered with the Tracer to receive notifications
-// about new spans.
-func ContribObserver(observer jaeger.ContribObserver) Option {
- return func(c *Options) {
- c.contribObservers = append(c.contribObservers, observer)
- }
-}
-
-// Gen128Bit specifies whether to generate 128bit trace IDs.
-func Gen128Bit(gen128Bit bool) Option {
- return func(c *Options) {
- c.gen128Bit = gen128Bit
- }
-}
-
-// PoolSpans specifies whether to pool spans
-func PoolSpans(poolSpans bool) Option {
- return func(c *Options) {
- c.poolSpans = poolSpans
- }
-}
-
-// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
-// and server spans a la zipkin. If false, client and server spans will be assigned
-// different IDs.
-func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
- return func(c *Options) {
- c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
- }
-}
-
-// MaxTagValueLength can be provided to override the default max tag value length.
-func MaxTagValueLength(maxTagValueLength int) Option {
- return func(c *Options) {
- c.maxTagValueLength = maxTagValueLength
- }
-}
-
-// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
-// when calling span.setSamplingPriority to force sample a span.
-func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
- return func(c *Options) {
- c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
- }
-}
-
-// Tag creates an option that adds a tracer-level tag.
-func Tag(key string, value interface{}) Option {
- return func(c *Options) {
- c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
- }
-}
-
-// Injector registers an Injector with the given format.
-func Injector(format interface{}, injector jaeger.Injector) Option {
- return func(c *Options) {
- c.injectors[format] = injector
- }
-}
-
-// Extractor registers an Extractor with the given format.
-func Extractor(format interface{}, extractor jaeger.Extractor) Option {
- return func(c *Options) {
- c.extractors[format] = extractor
- }
-}
-
-// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs.
-func WithRandomNumber(f func() uint64) Option {
- return func(c *Options) {
- c.randomNumber = f
- }
-}
-
-func applyOptions(options ...Option) Options {
- opts := Options{
- injectors: make(map[interface{}]jaeger.Injector),
- extractors: make(map[interface{}]jaeger.Extractor),
- }
- for _, option := range options {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = metrics.NullFactory
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
deleted file mode 100644
index 35710cfef6..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
-
- "github.com/opentracing/opentracing-go"
-)
-
-const (
- // JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.30.0"
-
- // JaegerClientVersionTagKey is the name of the tag used to report client version.
- JaegerClientVersionTagKey = "jaeger.version"
-
- // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
- // if found in the carrier, forces the trace to be sampled as "debug" trace.
- // The value of the header is recorded as the tag on the root span, so that the
- // trace can be found in the UI using this value as a correlation ID.
- JaegerDebugHeader = "jaeger-debug-id"
-
- // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
- // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
- // a root span does not exist.
- JaegerBaggageHeader = "jaeger-baggage"
-
- // TracerHostnameTagKey used to report host name of the process.
- TracerHostnameTagKey = "hostname"
-
- // TracerIPTagKey used to report ip of the process.
- TracerIPTagKey = "ip"
-
- // TracerUUIDTagKey used to report UUID of the client process.
- TracerUUIDTagKey = "client-uuid"
-
- // SamplerTypeTagKey reports which sampler was used on the root span.
- SamplerTypeTagKey = "sampler.type"
-
- // SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
- SamplerParamTagKey = "sampler.param"
-
- // TraceContextHeaderName is the http header name used to propagate tracing context.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceContextHeaderName = "uber-trace-id"
-
- // TracerStateHeaderName is deprecated.
- // Deprecated: use TraceContextHeaderName
- TracerStateHeaderName = TraceContextHeaderName
-
- // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceBaggageHeaderPrefix = "uberctx-"
-
- // SamplerTypeConst is the type of sampler that always makes the same decision.
- SamplerTypeConst = "const"
-
- // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
- SamplerTypeRemote = "remote"
-
- // SamplerTypeProbabilistic is the type of sampler that samples traces
- // with a certain fixed probability.
- SamplerTypeProbabilistic = "probabilistic"
-
- // SamplerTypeRateLimiting is the type of sampler that samples
- // only up to a fixed number of traces per second.
- SamplerTypeRateLimiting = "ratelimiting"
-
- // SamplerTypeLowerBound is the type of sampler that samples
- // at least a fixed number of traces per second.
- SamplerTypeLowerBound = "lowerbound"
-
- // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
- DefaultUDPSpanServerHost = "localhost"
-
- // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
- DefaultUDPSpanServerPort = 6831
-
- // DefaultSamplingServerPort is the default port to fetch sampling config from, via http
- DefaultSamplingServerPort = 5778
-
- // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
- DefaultMaxTagValueLength = 256
-
- // SelfRefType is a jaeger specific reference type that supports creating a span
- // with an already defined context.
- selfRefType opentracing.SpanReferenceType = 99
-)
-
-var (
- // DefaultSamplingServerURL is the default url to fetch sampling config from, via http
- DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
deleted file mode 100644
index 4ce1881f3b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- opentracing "github.com/opentracing/opentracing-go"
-)
-
-// ContribObserver can be registered with the Tracer to receive notifications
-// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
-type ContribObserver interface {
- // Create and return a span observer. Called when a span starts.
- // If the Observer is not interested in the given span, it must return (nil, false).
- // E.g :
- // func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
- // var sp opentracing.Span
- // sso := opentracing.StartSpanOptions{}
- // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
- // // we have a valid SpanObserver
- // }
- // ...
- // }
- OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
-}
-
-// ContribSpanObserver is created by the Observer and receives notifications
-// about other Span events. This interface is meant to match
-// github.com/opentracing-contrib/go-observer, via duck typing, without
-// directly importing the go-observer package.
-type ContribSpanObserver interface {
- OnSetOperationName(operationName string)
- OnSetTag(key string, value interface{})
- OnFinish(options opentracing.FinishOptions)
-}
-
-// wrapper observer for the old observers (see observer.go)
-type oldObserver struct {
- obs Observer
-}
-
-func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
- spanObserver := o.obs.OnStartSpan(operationName, options)
- return spanObserver, spanObserver != nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
deleted file mode 100644
index fac3c09f9f..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
-
-For integration instructions please refer to the README:
-
-https://github.com/uber/jaeger-client-go/blob/master/README.md
-*/
-package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
deleted file mode 100644
index c1ec339258..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/glide.lock
+++ /dev/null
@@ -1,105 +0,0 @@
-hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed
-updated: 2020-07-31T13:30:37.242608-04:00
-imports:
-- name: github.com/beorn7/perks
- version: 3a771d992973f24aa725d07868b467d1ddfceafb
- subpackages:
- - quantile
-- name: github.com/HdrHistogram/hdrhistogram-go
- version: 3a0bb77429bd3a61596f5e8a3172445844342120
-- name: github.com/crossdock/crossdock-go
- version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
- subpackages:
- - assert
- - require
-- name: github.com/davecgh/go-spew
- version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
- subpackages:
- - spew
-- name: github.com/golang/mock
- version: 51421b967af1f557f93a59e0057aaf15ca02e29c
- subpackages:
- - gomock
-- name: github.com/golang/protobuf
- version: b5d812f8a3706043e23a9cd5babf2e5423744d30
- subpackages:
- - proto
-- name: github.com/matttproud/golang_protobuf_extensions
- version: c182affec369e30f25d3eb8cd8a478dee585ae7d
- subpackages:
- - pbutil
-- name: github.com/opentracing/opentracing-go
- version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12
- subpackages:
- - ext
- - harness
- - log
-- name: github.com/pkg/errors
- version: ba968bfe8b2f7e042a574c888954fccecfa385b4
-- name: github.com/pmezard/go-difflib
- version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
- subpackages:
- - difflib
-- name: github.com/prometheus/client_golang
- version: 170205fb58decfd011f1550d4cfb737230d7ae4f
- subpackages:
- - prometheus
- - prometheus/internal
-- name: github.com/prometheus/client_model
- version: fd36f4220a901265f90734c3183c5f0c91daa0b8
- subpackages:
- - go
-- name: github.com/prometheus/common
- version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119
- subpackages:
- - expfmt
- - internal/bitbucket.org/ww/goautoneg
- - model
-- name: github.com/prometheus/procfs
- version: 8a055596020d692cf491851e47ba3e302d9f90ce
- subpackages:
- - internal/fs
- - internal/util
-- name: github.com/stretchr/testify
- version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8
- subpackages:
- - assert
- - mock
- - require
- - suite
-- name: github.com/uber-go/atomic
- version: 845920076a298bdb984fb0f1b86052e4ca0a281c
-- name: github.com/uber/jaeger-lib
- version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4
- subpackages:
- - metrics
- - metrics/metricstest
- - metrics/prometheus
-- name: go.uber.org/atomic
- version: 845920076a298bdb984fb0f1b86052e4ca0a281c
-- name: go.uber.org/multierr
- version: b587143a48b62b01d337824eab43700af6ffe222
-- name: go.uber.org/zap
- version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd
- subpackages:
- - buffer
- - internal/bufferpool
- - internal/color
- - internal/exit
- - zapcore
- - zaptest/observer
-- name: golang.org/x/net
- version: addf6b3196f61cd44ce5a76657913698c73479d0
- subpackages:
- - context
- - context/ctxhttp
-- name: golang.org/x/sys
- version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64
- subpackages:
- - internal/unsafeheader
- - windows
-- name: gopkg.in/yaml.v3
- version: eeeca48fe7764f320e4870d231902bf9c1be2c08
-testImports:
-- name: github.com/stretchr/objx
- version: 35313a95ee26395aa17d366c71a2ccf788fa69b6
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
deleted file mode 100644
index 295678c910..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/glide.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-package: github.com/uber/jaeger-client-go
-import:
-- package: github.com/opentracing/opentracing-go
- version: ^1.2
- subpackages:
- - ext
- - log
-- package: github.com/crossdock/crossdock-go
-- package: github.com/uber/jaeger-lib
- version: ^2.3.0
- subpackages:
- - metrics
-- package: github.com/pkg/errors
- version: ~0.8.0
-- package: go.uber.org/zap
- source: https://github.com/uber-go/zap.git
- version: ^1
-- package: github.com/uber-go/atomic
- version: ^1
-- package: github.com/prometheus/client_golang
- version: 1.1
-- package: github.com/prometheus/procfs
- version: 0.0.6
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
- - require
- - suite
-- package: github.com/golang/mock
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
deleted file mode 100644
index 5da70351d9..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/header.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// HeadersConfig contains the values for the header keys that Jaeger will use.
-// These values may be either custom or default depending on whether custom
-// values were provided via a configuration.
-type HeadersConfig struct {
- // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
- // if found in the carrier, forces the trace to be sampled as "debug" trace.
- // The value of the header is recorded as the tag on the root span, so that the
- // trace can be found in the UI using this value as a correlation ID.
- JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
-
- // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
- // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
- // a root span does not exist.
- JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
-
- // TraceContextHeaderName is the http header name used to propagate tracing context.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
-
- // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
- // This must be in lower-case to avoid mismatches when decoding incoming headers.
- TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
-}
-
-// ApplyDefaults sets missing configuration keys to default values
-func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
- if c.JaegerBaggageHeader == "" {
- c.JaegerBaggageHeader = JaegerBaggageHeader
- }
- if c.JaegerDebugHeader == "" {
- c.JaegerDebugHeader = JaegerDebugHeader
- }
- if c.TraceBaggageHeaderPrefix == "" {
- c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
- }
- if c.TraceContextHeaderName == "" {
- c.TraceContextHeaderName = TraceContextHeaderName
- }
- return c
-}
-
-func getDefaultHeadersConfig() *HeadersConfig {
- return &HeadersConfig{
- JaegerDebugHeader: JaegerDebugHeader,
- JaegerBaggageHeader: JaegerBaggageHeader,
- TraceContextHeaderName: TraceContextHeaderName,
- TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
deleted file mode 100644
index 745729319f..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- defaultMaxValueLength = 2048
- defaultRefreshInterval = time.Minute
- defaultHostPort = "localhost:5778"
-)
-
-// Option is a function that sets some option on the RestrictionManager
-type Option func(options *options)
-
-// Options is a factory for all available options
-var Options options
-
-type options struct {
- denyBaggageOnInitializationFailure bool
- metrics *jaeger.Metrics
- logger jaeger.Logger
- hostPort string
- refreshInterval time.Duration
-}
-
-// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
-// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
-// restrictions have been retrieved from agent.
-// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
-// restrictions have been retrieved from agent.
-func (options) DenyBaggageOnInitializationFailure(b bool) Option {
- return func(o *options) {
- o.denyBaggageOnInitializationFailure = b
- }
-}
-
-// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
-func (options) Metrics(m *jaeger.Metrics) Option {
- return func(o *options) {
- o.metrics = m
- }
-}
-
-// Logger creates an Option that sets the logger used by the RestrictionManager.
-func (options) Logger(logger jaeger.Logger) Option {
- return func(o *options) {
- o.logger = logger
- }
-}
-
-// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
-func (options) HostPort(hostPort string) Option {
- return func(o *options) {
- o.hostPort = hostPort
- }
-}
-
-// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
-// the baggage restrictions.
-func (options) RefreshInterval(refreshInterval time.Duration) Option {
- return func(o *options) {
- o.refreshInterval = refreshInterval
- }
-}
-
-func applyOptions(o ...Option) options {
- opts := options{}
- for _, option := range o {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = jaeger.NewNullMetrics()
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- if opts.hostPort == "" {
- opts.hostPort = defaultHostPort
- }
- if opts.refreshInterval == 0 {
- opts.refreshInterval = defaultRefreshInterval
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
deleted file mode 100644
index 2f58bb541a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "context"
- "fmt"
- "net/url"
- "sync"
- "time"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-type httpBaggageRestrictionManagerProxy struct {
- url string
-}
-
-func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
- v := url.Values{}
- v.Set("service", serviceName)
- return &httpBaggageRestrictionManagerProxy{
- url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
- }
-}
-
-func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) {
- var out []*thrift.BaggageRestriction
- if err := utils.GetJSON(s.url, &out); err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
-type RestrictionManager struct {
- options
-
- mux sync.RWMutex
- serviceName string
- restrictions map[string]*baggage.Restriction
- thriftProxy thrift.BaggageRestrictionManager
- pollStopped sync.WaitGroup
- stopPoll chan struct{}
- invalidRestriction *baggage.Restriction
- validRestriction *baggage.Restriction
-
- // Determines if the manager has successfully retrieved baggage restrictions from agent
- initialized bool
-}
-
-// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
-// baggage restrictions.
-func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
- // TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
- // restrictionsMap will need to exist per service
- opts := applyOptions(options...)
- m := &RestrictionManager{
- serviceName: serviceName,
- options: opts,
- restrictions: make(map[string]*baggage.Restriction),
- thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
- stopPoll: make(chan struct{}),
- invalidRestriction: baggage.NewRestriction(false, 0),
- validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
- }
- m.pollStopped.Add(1)
- go m.pollManager()
- return m
-}
-
-// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
-func (m *RestrictionManager) isReady() bool {
- m.mux.RLock()
- defer m.mux.RUnlock()
- return m.initialized
-}
-
-// GetRestriction implements RestrictionManager#GetRestriction.
-func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
- m.mux.RLock()
- defer m.mux.RUnlock()
- if !m.initialized {
- if m.denyBaggageOnInitializationFailure {
- return m.invalidRestriction
- }
- return m.validRestriction
- }
- if restriction, ok := m.restrictions[key]; ok {
- return restriction
- }
- return m.invalidRestriction
-}
-
-// Close stops remote polling and closes the RemoteRestrictionManager.
-func (m *RestrictionManager) Close() error {
- close(m.stopPoll)
- m.pollStopped.Wait()
- return nil
-}
-
-func (m *RestrictionManager) pollManager() {
- defer m.pollStopped.Done()
- // attempt to initialize baggage restrictions
- if err := m.updateRestrictions(); err != nil {
- m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
- }
- ticker := time.NewTicker(m.refreshInterval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := m.updateRestrictions(); err != nil {
- m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
- }
- case <-m.stopPoll:
- return
- }
- }
-}
-
-func (m *RestrictionManager) updateRestrictions() error {
- restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName)
- if err != nil {
- m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
- return err
- }
- newRestrictions := m.parseRestrictions(restrictions)
- m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
- m.mux.Lock()
- defer m.mux.Unlock()
- m.initialized = true
- m.restrictions = newRestrictions
- return nil
-}
-
-func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
- setters := make(map[string]*baggage.Restriction, len(restrictions))
- for _, restriction := range restrictions {
- setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
- }
- return setters
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
deleted file mode 100644
index c16a5c5662..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package baggage
-
-const (
- defaultMaxValueLength = 2048
-)
-
-// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
-type Restriction struct {
- keyAllowed bool
- maxValueLength int
-}
-
-// NewRestriction returns a new Restriction.
-func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
- return &Restriction{
- keyAllowed: keyAllowed,
- maxValueLength: maxValueLength,
- }
-}
-
-// KeyAllowed returns whether the baggage key for this restriction is allowed.
-func (r *Restriction) KeyAllowed() bool {
- return r.keyAllowed
-}
-
-// MaxValueLength returns the max length for the baggage value.
-func (r *Restriction) MaxValueLength() int {
- return r.maxValueLength
-}
-
-// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
-// will return a Restriction for a specific baggage key which will determine whether the baggage
-// key is allowed for the current service and any other applicable restrictions on the baggage
-// value.
-type RestrictionManager interface {
- GetRestriction(service, key string) *Restriction
-}
-
-// DefaultRestrictionManager allows any baggage key.
-type DefaultRestrictionManager struct {
- defaultRestriction *Restriction
-}
-
-// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
-func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
- if maxValueLength == 0 {
- maxValueLength = defaultMaxValueLength
- }
- return &DefaultRestrictionManager{
- defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
- }
-}
-
-// GetRestriction implements RestrictionManager#GetRestriction.
-func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
- return m.defaultRestriction
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
deleted file mode 100644
index fe0bef268a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) 2020 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reporterstats
-
-// ReporterStats exposes some metrics from the RemoteReporter.
-type ReporterStats interface {
- SpansDroppedFromQueue() int64
-}
-
-// Receiver can be implemented by a Transport to be given ReporterStats.
-type Receiver interface {
- SetReporterStats(ReporterStats)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
deleted file mode 100644
index 0e10b8a5aa..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanlog
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/opentracing/opentracing-go/log"
-)
-
-type fieldsAsMap map[string]string
-
-// MaterializeWithJSON converts log Fields into JSON string
-// TODO refactor into pluggable materializer
-func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
- fields := fieldsAsMap(make(map[string]string, len(logFields)))
- for _, field := range logFields {
- field.Marshal(fields)
- }
- if event, ok := fields["event"]; ok && len(fields) == 1 {
- return []byte(event), nil
- }
- return json.Marshal(fields)
-}
-
-func (ml fieldsAsMap) EmitString(key, value string) {
- ml[key] = value
-}
-
-func (ml fieldsAsMap) EmitBool(key string, value bool) {
- ml[key] = fmt.Sprintf("%t", value)
-}
-
-func (ml fieldsAsMap) EmitInt(key string, value int) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt32(key string, value int32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt64(key string, value int64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
- ml[key] = fmt.Sprintf("%+v", value)
-}
-
-func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
- value(ml)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
deleted file mode 100644
index f52c322fb6..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go"
-)
-
-const (
- defaultHostPort = "localhost:5778"
- defaultRefreshInterval = time.Second * 5
-)
-
-// Option is a function that sets some option on the Throttler
-type Option func(options *options)
-
-// Options is a factory for all available options
-var Options options
-
-type options struct {
- metrics *jaeger.Metrics
- logger jaeger.Logger
- hostPort string
- refreshInterval time.Duration
- synchronousInitialization bool
-}
-
-// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
-func (options) Metrics(m *jaeger.Metrics) Option {
- return func(o *options) {
- o.metrics = m
- }
-}
-
-// Logger creates an Option that sets the logger used by the Throttler.
-func (options) Logger(logger jaeger.Logger) Option {
- return func(o *options) {
- o.logger = logger
- }
-}
-
-// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
-func (options) HostPort(hostPort string) Option {
- return func(o *options) {
- o.hostPort = hostPort
- }
-}
-
-// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
-// credits.
-func (options) RefreshInterval(refreshInterval time.Duration) Option {
- return func(o *options) {
- o.refreshInterval = refreshInterval
- }
-}
-
-// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
-// fetch credits from the agent when an operation is seen for the first time. This should be set to true
-// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
-// such that sampling or throttling occurs.
-func (options) SynchronousInitialization(b bool) Option {
- return func(o *options) {
- o.synchronousInitialization = b
- }
-}
-
-func applyOptions(o ...Option) options {
- opts := options{}
- for _, option := range o {
- option(&opts)
- }
- if opts.metrics == nil {
- opts.metrics = jaeger.NewNullMetrics()
- }
- if opts.logger == nil {
- opts.logger = jaeger.NullLogger
- }
- if opts.hostPort == "" {
- opts.hostPort = defaultHostPort
- }
- if opts.refreshInterval == 0 {
- opts.refreshInterval = defaultRefreshInterval
- }
- return opts
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
deleted file mode 100644
index 20f434fe49..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package remote
-
-import (
- "fmt"
- "net/url"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkg/errors"
-
- "github.com/uber/jaeger-client-go"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- // minimumCredits is the minimum amount of credits necessary to not be throttled.
- // i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
- minimumCredits = 1.0
-)
-
-var (
- errorUUIDNotSet = errors.New("Throttler UUID must be set")
-)
-
-type operationBalance struct {
- Operation string `json:"operation"`
- Balance float64 `json:"balance"`
-}
-
-type creditResponse struct {
- Balances []operationBalance `json:"balances"`
-}
-
-type httpCreditManagerProxy struct {
- hostPort string
-}
-
-func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
- return &httpCreditManagerProxy{
- hostPort: hostPort,
- }
-}
-
-// N.B. Operations list must not be empty.
-func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
- params := url.Values{}
- params.Set("service", serviceName)
- params.Set("uuid", uuid)
- for _, op := range operations {
- params.Add("operations", op)
- }
- var resp creditResponse
- if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
- return nil, errors.Wrap(err, "Failed to receive credits from agent")
- }
- return &resp, nil
-}
-
-// Throttler retrieves credits from agent and uses it to throttle operations.
-type Throttler struct {
- options
-
- mux sync.RWMutex
- service string
- uuid atomic.Value
- creditManager *httpCreditManagerProxy
- credits map[string]float64 // map of operation->credits
- close chan struct{}
- stopped sync.WaitGroup
-}
-
-// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
-// the service.
-func NewThrottler(service string, options ...Option) *Throttler {
- opts := applyOptions(options...)
- creditManager := newHTTPCreditManagerProxy(opts.hostPort)
- t := &Throttler{
- options: opts,
- creditManager: creditManager,
- service: service,
- credits: make(map[string]float64),
- close: make(chan struct{}),
- }
- t.stopped.Add(1)
- go t.pollManager()
- return t
-}
-
-// IsAllowed implements Throttler#IsAllowed.
-func (t *Throttler) IsAllowed(operation string) bool {
- t.mux.Lock()
- defer t.mux.Unlock()
- value, ok := t.credits[operation]
- if !ok || value == 0 {
- if !ok {
- // NOTE: This appears to be a no-op at first glance, but it stores
- // the operation key in the map. Necessary for functionality of
- // Throttler#operations method.
- t.credits[operation] = 0
- }
- if !t.synchronousInitialization {
- t.metrics.ThrottledDebugSpans.Inc(1)
- return false
- }
- // If it is the first time this operation is being checked, synchronously fetch
- // the credits.
- credits, err := t.fetchCredits([]string{operation})
- if err != nil {
- // Failed to receive credits from agent, try again next time
- t.logger.Error("Failed to fetch credits: " + err.Error())
- return false
- }
- if len(credits.Balances) == 0 {
- // This shouldn't happen but just in case
- return false
- }
- for _, opBalance := range credits.Balances {
- t.credits[opBalance.Operation] += opBalance.Balance
- }
- }
- return t.isAllowed(operation)
-}
-
-// Close stops the throttler from fetching credits from remote.
-func (t *Throttler) Close() error {
- close(t.close)
- t.stopped.Wait()
- return nil
-}
-
-// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
-// requests are made.
-func (t *Throttler) SetProcess(process jaeger.Process) {
- if process.UUID != "" {
- t.uuid.Store(process.UUID)
- }
-}
-
-// N.B. This function must be called with the Write Lock
-func (t *Throttler) isAllowed(operation string) bool {
- credits := t.credits[operation]
- if credits < minimumCredits {
- t.metrics.ThrottledDebugSpans.Inc(1)
- return false
- }
- t.credits[operation] = credits - minimumCredits
- return true
-}
-
-func (t *Throttler) pollManager() {
- defer t.stopped.Done()
- ticker := time.NewTicker(t.refreshInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- t.refreshCredits()
- case <-t.close:
- return
- }
- }
-}
-
-func (t *Throttler) operations() []string {
- t.mux.RLock()
- defer t.mux.RUnlock()
- operations := make([]string, 0, len(t.credits))
- for op := range t.credits {
- operations = append(operations, op)
- }
- return operations
-}
-
-func (t *Throttler) refreshCredits() {
- operations := t.operations()
- if len(operations) == 0 {
- return
- }
- newCredits, err := t.fetchCredits(operations)
- if err != nil {
- t.metrics.ThrottlerUpdateFailure.Inc(1)
- t.logger.Error("Failed to fetch credits: " + err.Error())
- return
- }
- t.metrics.ThrottlerUpdateSuccess.Inc(1)
-
- t.mux.Lock()
- defer t.mux.Unlock()
- for _, opBalance := range newCredits.Balances {
- t.credits[opBalance.Operation] += opBalance.Balance
- }
-}
-
-func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
- uuid := t.uuid.Load()
- uuidStr, _ := uuid.(string)
- if uuid == nil || uuidStr == "" {
- return nil, errorUUIDNotSet
- }
- return t.creditManager.FetchCredits(uuidStr, t.service, operations)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
deleted file mode 100644
index 196ed69cac..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package throttler
-
-// Throttler is used to rate limits operations. For example, given how debug spans
-// are always sampled, a throttler can be enabled per client to rate limit the amount
-// of debug spans a client can start.
-type Throttler interface {
- // IsAllowed determines whether the operation should be allowed and not be
- // throttled.
- IsAllowed(operation string) bool
-}
-
-// DefaultThrottler doesn't throttle at all.
-type DefaultThrottler struct{}
-
-// IsAllowed implements Throttler#IsAllowed.
-func (t DefaultThrottler) IsAllowed(operation string) bool {
- return true
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
deleted file mode 100644
index 8402d087c2..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/interop.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go"
-)
-
-// TODO this file should not be needed after TChannel PR.
-
-type formatKey int
-
-// SpanContextFormat is a constant used as OpenTracing Format.
-// Requires *SpanContext as carrier.
-// This format is intended for interop with TChannel or other Zipkin-like tracers.
-const SpanContextFormat formatKey = iota
-
-type jaegerTraceContextPropagator struct {
- tracer *Tracer
-}
-
-func (p *jaegerTraceContextPropagator) Inject(
- ctx SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(*SpanContext)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- carrier.CopyFrom(&ctx)
- return nil
-}
-
-func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(*SpanContext)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- ctx := new(SpanContext)
- ctx.CopyFrom(carrier)
- return *ctx, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
deleted file mode 100644
index 868b2a5b54..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
-
- "github.com/opentracing/opentracing-go/log"
-
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
-)
-
-type tags []*j.Tag
-
-// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
-func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
- fields := tags(make([]*j.Tag, 0, len(logFields)))
- for _, field := range logFields {
- field.Marshal(&fields)
- }
- return fields
-}
-
-func (t *tags) EmitString(key, value string) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
-}
-
-func (t *tags) EmitBool(key string, value bool) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
-}
-
-func (t *tags) EmitInt(key string, value int) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitInt32(key string, value int32) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitInt64(key string, value int64) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
-}
-
-func (t *tags) EmitUint32(key string, value uint32) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitUint64(key string, value uint64) {
- vLong := int64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
-}
-
-func (t *tags) EmitFloat32(key string, value float32) {
- vDouble := float64(value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
-}
-
-func (t *tags) EmitFloat64(key string, value float64) {
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
-}
-
-func (t *tags) EmitObject(key string, value interface{}) {
- vStr := fmt.Sprintf("%+v", value)
- *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
-}
-
-func (t *tags) EmitLazyLogger(value log.LazyLogger) {
- value(t)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
deleted file mode 100644
index 3ac2f8f949..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// BuildJaegerThrift builds jaeger span based on internal span.
-// TODO: (breaking change) move to internal package.
-func BuildJaegerThrift(span *Span) *j.Span {
- span.Lock()
- defer span.Unlock()
- startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
- duration := span.duration.Nanoseconds() / int64(time.Microsecond)
- jaegerSpan := &j.Span{
- TraceIdLow: int64(span.context.traceID.Low),
- TraceIdHigh: int64(span.context.traceID.High),
- SpanId: int64(span.context.spanID),
- ParentSpanId: int64(span.context.parentID),
- OperationName: span.operationName,
- Flags: int32(span.context.samplingState.flags()),
- StartTime: startTime,
- Duration: duration,
- Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
- Logs: buildLogs(span.logs),
- References: buildReferences(span.references),
- }
- return jaegerSpan
-}
-
-// BuildJaegerProcessThrift creates a thrift Process type.
-// TODO: (breaking change) move to internal package.
-func BuildJaegerProcessThrift(span *Span) *j.Process {
- span.Lock()
- defer span.Unlock()
- return buildJaegerProcessThrift(span.tracer)
-}
-
-func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
- process := &j.Process{
- ServiceName: tracer.serviceName,
- Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
- }
- if tracer.process.UUID != "" {
- process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
- }
- return process
-}
-
-func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
- jTags := make([]*j.Tag, 0, len(tags))
- for _, tag := range tags {
- jTag := buildTag(&tag, maxTagValueLength)
- jTags = append(jTags, jTag)
- }
- return jTags
-}
-
-func buildLogs(logs []opentracing.LogRecord) []*j.Log {
- jLogs := make([]*j.Log, 0, len(logs))
- for _, log := range logs {
- jLog := &j.Log{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
- Fields: ConvertLogsToJaegerTags(log.Fields),
- }
- jLogs = append(jLogs, jLog)
- }
- return jLogs
-}
-
-func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
- jTag := &j.Tag{Key: tag.key}
- switch value := tag.value.(type) {
- case string:
- vStr := truncateString(value, maxTagValueLength)
- jTag.VStr = &vStr
- jTag.VType = j.TagType_STRING
- case []byte:
- if len(value) > maxTagValueLength {
- value = value[:maxTagValueLength]
- }
- jTag.VBinary = value
- jTag.VType = j.TagType_BINARY
- case int:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int8:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint8:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int16:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint16:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int32:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint32:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case int64:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case uint64:
- vLong := int64(value)
- jTag.VLong = &vLong
- jTag.VType = j.TagType_LONG
- case float32:
- vDouble := float64(value)
- jTag.VDouble = &vDouble
- jTag.VType = j.TagType_DOUBLE
- case float64:
- vDouble := float64(value)
- jTag.VDouble = &vDouble
- jTag.VType = j.TagType_DOUBLE
- case bool:
- vBool := value
- jTag.VBool = &vBool
- jTag.VType = j.TagType_BOOL
- default:
- vStr := truncateString(stringify(value), maxTagValueLength)
- jTag.VStr = &vStr
- jTag.VType = j.TagType_STRING
- }
- return jTag
-}
-
-func buildReferences(references []Reference) []*j.SpanRef {
- retMe := make([]*j.SpanRef, 0, len(references))
- for _, ref := range references {
- if ref.Type == opentracing.ChildOfRef {
- retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
- } else if ref.Type == opentracing.FollowsFromRef {
- retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
- }
- }
- return retMe
-}
-
-func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
- return &j.SpanRef{
- RefType: refType,
- TraceIdLow: int64(ctx.traceID.Low),
- TraceIdHigh: int64(ctx.traceID.High),
- SpanId: int64(ctx.spanID),
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
deleted file mode 100644
index ced6e0ce93..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/log/logger.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package log
-
-import (
- "bytes"
- "fmt"
- "log"
- "sync"
-)
-
-// Logger provides an abstract interface for logging from Reporters.
-// Applications can provide their own implementation of this interface to adapt
-// reporters logging to whatever logging library they prefer (stdlib log,
-// logrus, go-logging, etc).
-type Logger interface {
- // Error logs a message at error priority
- Error(msg string)
-
- // Infof logs a message at info priority
- Infof(msg string, args ...interface{})
-}
-
-// StdLogger is implementation of the Logger interface that delegates to default `log` package
-var StdLogger = &stdLogger{}
-
-type stdLogger struct{}
-
-func (l *stdLogger) Error(msg string) {
- log.Printf("ERROR: %s", msg)
-}
-
-// Infof logs a message at info priority
-func (l *stdLogger) Infof(msg string, args ...interface{}) {
- log.Printf(msg, args...)
-}
-
-// Debugf logs a message at debug priority
-func (l *stdLogger) Debugf(msg string, args ...interface{}) {
- log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...)
-}
-
-// NullLogger is implementation of the Logger interface that is no-op
-var NullLogger = &nullLogger{}
-
-type nullLogger struct{}
-
-func (l *nullLogger) Error(msg string) {}
-func (l *nullLogger) Infof(msg string, args ...interface{}) {}
-func (l *nullLogger) Debugf(msg string, args ...interface{}) {}
-
-// BytesBufferLogger implements Logger backed by a bytes.Buffer.
-type BytesBufferLogger struct {
- mux sync.Mutex
- buf bytes.Buffer
-}
-
-// Error implements Logger.
-func (l *BytesBufferLogger) Error(msg string) {
- l.mux.Lock()
- l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
- l.mux.Unlock()
-}
-
-// Infof implements Logger.
-func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
- l.mux.Lock()
- l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
- l.mux.Unlock()
-}
-
-// Debugf implements Logger.
-func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) {
- l.mux.Lock()
- l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n")
- l.mux.Unlock()
-}
-
-// String returns string representation of the underlying buffer.
-func (l *BytesBufferLogger) String() string {
- l.mux.Lock()
- defer l.mux.Unlock()
- return l.buf.String()
-}
-
-// Flush empties the underlying buffer.
-func (l *BytesBufferLogger) Flush() {
- l.mux.Lock()
- defer l.mux.Unlock()
- l.buf.Reset()
-}
-
-// DebugLogger is an interface which adds a debug logging level
-type DebugLogger interface {
- Logger
-
- // Debugf logs a message at debug priority
- Debugf(msg string, args ...interface{})
-}
-
-// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger
-// If the provided Logger doesn't satisfy the interface, a logger with debug
-// disabled is returned
-func DebugLogAdapter(logger Logger) DebugLogger {
- if logger == nil {
- return nil
- }
- if debugLogger, ok := logger.(DebugLogger); ok {
- return debugLogger
- }
- logger.Infof("debug logging disabled")
- return debugDisabledLogAdapter{logger: logger}
-}
-
-type debugDisabledLogAdapter struct {
- logger Logger
-}
-
-func (d debugDisabledLogAdapter) Error(msg string) {
- d.logger.Error(msg)
-}
-
-func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) {
- d.logger.Infof(msg, args...)
-}
-
-// Debugf is a nop
-func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) {
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
deleted file mode 100644
index d4f0b50192..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/logger.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "log"
-
-// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
-
-// Logger provides an abstract interface for logging from Reporters.
-// Applications can provide their own implementation of this interface to adapt
-// reporters logging to whatever logging library they prefer (stdlib log,
-// logrus, go-logging, etc).
-type Logger interface {
- // Error logs a message at error priority
- Error(msg string)
-
- // Infof logs a message at info priority
- Infof(msg string, args ...interface{})
-}
-
-// StdLogger is implementation of the Logger interface that delegates to default `log` package
-var StdLogger = &stdLogger{}
-
-type stdLogger struct{}
-
-func (l *stdLogger) Error(msg string) {
- log.Printf("ERROR: %s", msg)
-}
-
-// Infof logs a message at info priority
-func (l *stdLogger) Infof(msg string, args ...interface{}) {
- log.Printf(msg, args...)
-}
-
-// NullLogger is implementation of the Logger interface that delegates to default `log` package
-var NullLogger = &nullLogger{}
-
-type nullLogger struct{}
-
-func (l *nullLogger) Error(msg string) {}
-func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
deleted file mode 100644
index 50e4e22d6c..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/metrics.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/uber/jaeger-lib/metrics"
-)
-
-// Metrics is a container of all stats emitted by Jaeger tracer.
-type Metrics struct {
- // Number of traces started by this tracer as sampled
- TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
-
- // Number of traces started by this tracer as not sampled
- TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
-
- // Number of traces started by this tracer with delayed sampling
- TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
-
- // Number of externally started sampled traces this tracer joined
- TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
-
- // Number of externally started not-sampled traces this tracer joined
- TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
-
- // Number of sampled spans started by this tracer
- SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
-
- // Number of not sampled spans started by this tracer
- SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
-
- // Number of spans with delayed sampling started by this tracer
- SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
-
- // Number of spans finished by this tracer
- SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
-
- // Number of spans finished by this tracer
- SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
-
- // Number of spans finished by this tracer
- SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
-
- // Number of errors decoding tracing context
- DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
-
- // Number of spans successfully reported
- ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
-
- // Number of spans not reported due to a Sender failure
- ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
-
- // Number of spans dropped due to internal queue overflow
- ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
-
- // Current number of spans in the reporter queue
- ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
-
- // Number of times the Sampler succeeded to retrieve sampling strategy
- SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
-
- // Number of times the Sampler failed to retrieve sampling strategy
- SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
-
- // Number of times the Sampler succeeded to retrieve and update sampling strategy
- SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
-
- // Number of times the Sampler failed to update sampling strategy
- SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
-
- // Number of times baggage was successfully written or updated on spans.
- BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
-
- // Number of times baggage failed to write or update on spans.
- BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
-
- // Number of times baggage was truncated as per baggage restrictions.
- BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
-
- // Number of times baggage restrictions were successfully updated.
- BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
-
- // Number of times baggage restrictions failed to update.
- BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
-
- // Number of times debug spans were throttled.
- ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
-
- // Number of times throttler successfully updated.
- ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
-
- // Number of times throttler failed to update.
- ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
-}
-
-// NewMetrics creates a new Metrics struct and initializes it.
-func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
- m := &Metrics{}
- // TODO the namespace "jaeger" should be configurable
- metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
- return m
-}
-
-// NewNullMetrics creates a new Metrics struct that won't report any metrics.
-func NewNullMetrics() *Metrics {
- return NewMetrics(metrics.NullFactory, nil)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
deleted file mode 100644
index 7bbd028897..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/observer.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import opentracing "github.com/opentracing/opentracing-go"
-
-// Observer can be registered with the Tracer to receive notifications about
-// new Spans.
-//
-// Deprecated: use jaeger.ContribObserver instead.
-type Observer interface {
- OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
-}
-
-// SpanObserver is created by the Observer and receives notifications about
-// other Span events.
-//
-// Deprecated: use jaeger.ContribSpanObserver instead.
-type SpanObserver interface {
- OnSetOperationName(operationName string)
- OnSetTag(key string, value interface{})
- OnFinish(options opentracing.FinishOptions)
-}
-
-// compositeObserver is a dispatcher to other observers
-type compositeObserver struct {
- observers []ContribObserver
-}
-
-// compositeSpanObserver is a dispatcher to other span observers
-type compositeSpanObserver struct {
- observers []ContribSpanObserver
-}
-
-// noopSpanObserver is used when there are no observers registered
-// on the Tracer or none of them returns span observers from OnStartSpan.
-var noopSpanObserver = &compositeSpanObserver{}
-
-func (o *compositeObserver) append(contribObserver ContribObserver) {
- o.observers = append(o.observers, contribObserver)
-}
-
-func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
- var spanObservers []ContribSpanObserver
- for _, obs := range o.observers {
- spanObs, ok := obs.OnStartSpan(sp, operationName, options)
- if ok {
- if spanObservers == nil {
- spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
- }
- spanObservers = append(spanObservers, spanObs)
- }
- }
- if len(spanObservers) == 0 {
- return noopSpanObserver
- }
- return &compositeSpanObserver{observers: spanObservers}
-}
-
-func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
- for _, obs := range o.observers {
- obs.OnSetOperationName(operationName)
- }
-}
-
-func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
- for _, obs := range o.observers {
- obs.OnSetTag(key, value)
- }
-}
-
-func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
- for _, obs := range o.observers {
- obs.OnFinish(options)
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
deleted file mode 100644
index 30cbf99624..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/process.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// Process holds process specific metadata that's relevant to this client.
-type Process struct {
- Service string
- UUID string
- Tags []Tag
-}
-
-// ProcessSetter sets a process. This can be used by any class that requires
-// the process to be set as part of initialization.
-// See internal/throttler/remote/throttler.go for an example.
-type ProcessSetter interface {
- SetProcess(process Process)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
deleted file mode 100644
index e06459b98f..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "log"
- "net/url"
- "strings"
- "sync"
-
- opentracing "github.com/opentracing/opentracing-go"
-)
-
-// Injector is responsible for injecting SpanContext instances in a manner suitable
-// for propagation via a format-specific "carrier" object. Typically the
-// injection will take place across an RPC boundary, but message queues and
-// other IPC mechanisms are also reasonable places to use an Injector.
-type Injector interface {
- // Inject takes `SpanContext` and injects it into `carrier`. The actual type
- // of `carrier` depends on the `format` passed to `Tracer.Inject()`.
- //
- // Implementations may return opentracing.ErrInvalidCarrier or any other
- // implementation-specific error if injection fails.
- Inject(ctx SpanContext, carrier interface{}) error
-}
-
-// Extractor is responsible for extracting SpanContext instances from a
-// format-specific "carrier" object. Typically the extraction will take place
-// on the server side of an RPC boundary, but message queues and other IPC
-// mechanisms are also reasonable places to use an Extractor.
-type Extractor interface {
- // Extract decodes a SpanContext instance from the given `carrier`,
- // or (nil, opentracing.ErrSpanContextNotFound) if no context could
- // be found in the `carrier`.
- Extract(carrier interface{}) (SpanContext, error)
-}
-
-// TextMapPropagator is a combined Injector and Extractor for TextMap format
-type TextMapPropagator struct {
- headerKeys *HeadersConfig
- metrics Metrics
- encodeValue func(string) string
- decodeValue func(string) string
-}
-
-// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
-func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
- return &TextMapPropagator{
- headerKeys: headerKeys,
- metrics: metrics,
- encodeValue: func(val string) string {
- return val
- },
- decodeValue: func(val string) string {
- return val
- },
- }
-}
-
-// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
-func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
- return &TextMapPropagator{
- headerKeys: headerKeys,
- metrics: metrics,
- encodeValue: func(val string) string {
- return url.QueryEscape(val)
- },
- decodeValue: func(val string) string {
- // ignore decoding errors, cannot do anything about them
- if v, err := url.QueryUnescape(val); err == nil {
- return v
- }
- return val
- },
- }
-}
-
-// BinaryPropagator is a combined Injector and Extractor for Binary format
-type BinaryPropagator struct {
- tracer *Tracer
- buffers sync.Pool
-}
-
-// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
-func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
- return &BinaryPropagator{
- tracer: tracer,
- buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
- }
-}
-
-// Inject implements Injector of TextMapPropagator
-func (p *TextMapPropagator) Inject(
- sc SpanContext,
- abstractCarrier interface{},
-) error {
- textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- // Do not encode the string with trace context to avoid accidental double-encoding
- // if people are using opentracing < 0.10.0. Our colon-separated representation
- // of the trace context is already safe for HTTP headers.
- textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
- for k, v := range sc.baggage {
- safeKey := p.addBaggageKeyPrefix(k)
- safeVal := p.encodeValue(v)
- textMapWriter.Set(safeKey, safeVal)
- }
- return nil
-}
-
-// Extract implements Extractor of TextMapPropagator
-func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- var ctx SpanContext
- var baggage map[string]string
- err := textMapReader.ForeachKey(func(rawKey, value string) error {
- key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
- if key == p.headerKeys.TraceContextHeaderName {
- var err error
- safeVal := p.decodeValue(value)
- if ctx, err = ContextFromString(safeVal); err != nil {
- return err
- }
- } else if key == p.headerKeys.JaegerDebugHeader {
- ctx.debugID = p.decodeValue(value)
- } else if key == p.headerKeys.JaegerBaggageHeader {
- if baggage == nil {
- baggage = make(map[string]string)
- }
- for k, v := range p.parseCommaSeparatedMap(value) {
- baggage[k] = v
- }
- } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
- if baggage == nil {
- baggage = make(map[string]string)
- }
- safeKey := p.removeBaggageKeyPrefix(key)
- safeVal := p.decodeValue(value)
- baggage[safeKey] = safeVal
- }
- return nil
- })
- if err != nil {
- p.metrics.DecodingErrors.Inc(1)
- return emptyContext, err
- }
- if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
- return emptyContext, opentracing.ErrSpanContextNotFound
- }
- ctx.baggage = baggage
- return ctx, nil
-}
-
-// Inject implements Injector of BinaryPropagator
-func (p *BinaryPropagator) Inject(
- sc SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(io.Writer)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- // Handle the tracer context
- if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
- return err
- }
- if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
- return err
- }
-
- // Handle the baggage items
- if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
- return err
- }
- for k, v := range sc.baggage {
- if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
- return err
- }
- io.WriteString(carrier, k)
- if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
- return err
- }
- io.WriteString(carrier, v)
- }
-
- return nil
-}
-
-// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits
-const (
- maxBinaryBaggage = 180
- maxBinaryNameValueLen = 4096
-)
-
-// Extract implements Extractor of BinaryPropagator
-func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(io.Reader)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- var ctx SpanContext
- ctx.samplingState = &samplingState{}
-
- if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
-
- var flags byte
- if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- ctx.samplingState.setFlags(flags)
-
- // Handle the baggage items
- var numBaggage int32
- if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if numBaggage > maxBinaryBaggage {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
- ctx.baggage = make(map[string]string, iNumBaggage)
- buf := p.buffers.Get().(*bytes.Buffer)
- defer p.buffers.Put(buf)
-
- var keyLen, valLen int32
- for i := 0; i < iNumBaggage; i++ {
- if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- buf.Reset()
- buf.Grow(int(keyLen))
- if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- key := buf.String()
-
- if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- if keyLen+valLen > maxBinaryNameValueLen {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- buf.Reset()
- buf.Grow(int(valLen))
- if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
- return emptyContext, opentracing.ErrSpanContextCorrupted
- }
- ctx.baggage[key] = buf.String()
- }
- }
-
- return ctx, nil
-}
-
-// Converts a comma separated key value pair list into a map
-// e.g. key1=value1, key2=value2, key3 = value3
-// is converted to map[string]string { "key1" : "value1",
-// "key2" : "value2",
-// "key3" : "value3" }
-func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
- baggage := make(map[string]string)
- value, err := url.QueryUnescape(value)
- if err != nil {
- log.Printf("Unable to unescape %s, %v", value, err)
- return baggage
- }
- for _, kvpair := range strings.Split(value, ",") {
- kv := strings.Split(strings.TrimSpace(kvpair), "=")
- if len(kv) == 2 {
- baggage[strings.TrimSpace(kv[0])] = kv[1]
- } else {
- log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
- }
- }
- return baggage
-}
-
-// Converts a baggage item key into an http header format,
-// by prepending TraceBaggageHeaderPrefix and encoding the key string
-func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
- // TODO encodeBaggageKeyAsHeader add caching and escaping
- return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
-}
-
-func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
- // TODO decodeBaggageHeaderKey add caching and escaping
- return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
deleted file mode 100644
index 5646e78bb2..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/reference.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "github.com/opentracing/opentracing-go"
-
-// Reference represents a causal reference to other Spans (via their SpanContext).
-type Reference struct {
- Type opentracing.SpanReferenceType
- Context SpanContext
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
deleted file mode 100644
index a71a92c3e8..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/reporter.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- "github.com/uber/jaeger-client-go/internal/reporterstats"
- "github.com/uber/jaeger-client-go/log"
-)
-
-// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
-type Reporter interface {
- // Report submits a new span to collectors, possibly asynchronously and/or with buffering.
- // If the reporter is processing Span asynchronously then it needs to Retain() the span,
- // and then Release() it when no longer needed, to avoid span data corruption.
- Report(span *Span)
-
- // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
- Close()
-}
-
-// ------------------------------
-
-type nullReporter struct{}
-
-// NewNullReporter creates a no-op reporter that ignores all reported spans.
-func NewNullReporter() Reporter {
- return &nullReporter{}
-}
-
-// Report implements Report() method of Reporter by doing nothing.
-func (r *nullReporter) Report(span *Span) {
- // no-op
-}
-
-// Close implements Close() method of Reporter by doing nothing.
-func (r *nullReporter) Close() {
- // no-op
-}
-
-// ------------------------------
-
-type loggingReporter struct {
- logger Logger
-}
-
-// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
-func NewLoggingReporter(logger Logger) Reporter {
- return &loggingReporter{logger}
-}
-
-// Report implements Report() method of Reporter by logging the span to the logger.
-func (r *loggingReporter) Report(span *Span) {
- r.logger.Infof("Reporting span %+v", span)
-}
-
-// Close implements Close() method of Reporter by doing nothing.
-func (r *loggingReporter) Close() {
- // no-op
-}
-
-// ------------------------------
-
-// InMemoryReporter is used for testing, and simply collects spans in memory.
-type InMemoryReporter struct {
- spans []opentracing.Span
- lock sync.Mutex
-}
-
-// NewInMemoryReporter creates a reporter that stores spans in memory.
-// NOTE: the Tracer should be created with options.PoolSpans = false.
-func NewInMemoryReporter() *InMemoryReporter {
- return &InMemoryReporter{
- spans: make([]opentracing.Span, 0, 10),
- }
-}
-
-// Report implements Report() method of Reporter by storing the span in the buffer.
-func (r *InMemoryReporter) Report(span *Span) {
- r.lock.Lock()
- // Need to retain the span otherwise it will be released
- r.spans = append(r.spans, span.Retain())
- r.lock.Unlock()
-}
-
-// Close implements Close() method of Reporter
-func (r *InMemoryReporter) Close() {
- r.Reset()
-}
-
-// SpansSubmitted returns the number of spans accumulated in the buffer.
-func (r *InMemoryReporter) SpansSubmitted() int {
- r.lock.Lock()
- defer r.lock.Unlock()
- return len(r.spans)
-}
-
-// GetSpans returns accumulated spans as a copy of the buffer.
-func (r *InMemoryReporter) GetSpans() []opentracing.Span {
- r.lock.Lock()
- defer r.lock.Unlock()
- copied := make([]opentracing.Span, len(r.spans))
- copy(copied, r.spans)
- return copied
-}
-
-// Reset clears all accumulated spans.
-func (r *InMemoryReporter) Reset() {
- r.lock.Lock()
- defer r.lock.Unlock()
-
- // Before reset the collection need to release Span memory
- for _, span := range r.spans {
- span.(*Span).Release()
- }
- r.spans = r.spans[:0]
-}
-
-// ------------------------------
-
-type compositeReporter struct {
- reporters []Reporter
-}
-
-// NewCompositeReporter creates a reporter that ignores all reported spans.
-func NewCompositeReporter(reporters ...Reporter) Reporter {
- return &compositeReporter{reporters: reporters}
-}
-
-// Report implements Report() method of Reporter by delegating to each underlying reporter.
-func (r *compositeReporter) Report(span *Span) {
- for _, reporter := range r.reporters {
- reporter.Report(span)
- }
-}
-
-// Close implements Close() method of Reporter by closing each underlying reporter.
-func (r *compositeReporter) Close() {
- for _, reporter := range r.reporters {
- reporter.Close()
- }
-}
-
-// ------------- REMOTE REPORTER -----------------
-
-type reporterQueueItemType int
-
-const (
- defaultQueueSize = 100
- defaultBufferFlushInterval = 1 * time.Second
-
- reporterQueueItemSpan reporterQueueItemType = iota
- reporterQueueItemClose
-)
-
-type reporterQueueItem struct {
- itemType reporterQueueItemType
- span *Span
- close *sync.WaitGroup
-}
-
-// reporterStats implements reporterstats.ReporterStats.
-type reporterStats struct {
- droppedCount int64 // provided to Transports to report data loss to the backend
-}
-
-// SpansDroppedFromQueue implements reporterstats.ReporterStats.
-func (r *reporterStats) SpansDroppedFromQueue() int64 {
- return atomic.LoadInt64(&r.droppedCount)
-}
-
-func (r *reporterStats) incDroppedCount() {
- atomic.AddInt64(&r.droppedCount, 1)
-}
-
-type remoteReporter struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- queueLength int64 // used to update metrics.Gauge
- closed int64 // 0 - not closed, 1 - closed
-
- reporterOptions
-
- sender Transport
- queue chan reporterQueueItem
- reporterStats *reporterStats
-}
-
-// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
-// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
-// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
-// Calls to Close() block until all spans reported prior to the call to Close are flushed.
-func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
- options := reporterOptions{}
- for _, option := range opts {
- option(&options)
- }
- if options.bufferFlushInterval <= 0 {
- options.bufferFlushInterval = defaultBufferFlushInterval
- }
- if options.logger == nil {
- options.logger = log.NullLogger
- }
- if options.metrics == nil {
- options.metrics = NewNullMetrics()
- }
- if options.queueSize <= 0 {
- options.queueSize = defaultQueueSize
- }
- reporter := &remoteReporter{
- reporterOptions: options,
- sender: sender,
- queue: make(chan reporterQueueItem, options.queueSize),
- reporterStats: new(reporterStats),
- }
- if receiver, ok := sender.(reporterstats.Receiver); ok {
- receiver.SetReporterStats(reporter.reporterStats)
- }
- go reporter.processQueue()
- return reporter
-}
-
-// Report implements Report() method of Reporter.
-// It passes the span to a background go-routine for submission to Jaeger backend.
-// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
-// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
-// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
-// because some of them may still be successfully added to the queue.
-func (r *remoteReporter) Report(span *Span) {
- select {
- // Need to retain the span otherwise it will be released
- case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
- atomic.AddInt64(&r.queueLength, 1)
- default:
- r.metrics.ReporterDropped.Inc(1)
- r.reporterStats.incDroppedCount()
- }
-}
-
-// Close implements Close() method of Reporter by waiting for the queue to be drained.
-func (r *remoteReporter) Close() {
- r.logger.Debugf("closing reporter")
- if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
- r.logger.Error("Repeated attempt to close the reporter is ignored")
- return
- }
- r.sendCloseEvent()
- _ = r.sender.Close()
-}
-
-func (r *remoteReporter) sendCloseEvent() {
- wg := &sync.WaitGroup{}
- wg.Add(1)
- item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
-
- r.queue <- item // if the queue is full we will block until there is space
- atomic.AddInt64(&r.queueLength, 1)
- wg.Wait()
-}
-
-// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
-// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
-// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
-// reporting new spans.
-func (r *remoteReporter) processQueue() {
- // flush causes the Sender to flush its accumulated spans and clear the buffer
- flush := func() {
- if flushed, err := r.sender.Flush(); err != nil {
- r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
- } else if flushed > 0 {
- r.metrics.ReporterSuccess.Inc(int64(flushed))
- }
- }
-
- timer := time.NewTicker(r.bufferFlushInterval)
- for {
- select {
- case <-timer.C:
- flush()
- case item := <-r.queue:
- atomic.AddInt64(&r.queueLength, -1)
- switch item.itemType {
- case reporterQueueItemSpan:
- span := item.span
- if flushed, err := r.sender.Append(span); err != nil {
- r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
- } else if flushed > 0 {
- r.metrics.ReporterSuccess.Inc(int64(flushed))
- // to reduce the number of gauge stats, we only emit queue length on flush
- r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
- r.logger.Debugf("flushed %d spans", flushed)
- }
- span.Release()
- case reporterQueueItemClose:
- timer.Stop()
- flush()
- item.close.Done()
- return
- }
- }
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
deleted file mode 100644
index 2fc030547e..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/reporter_options.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go/log"
-)
-
-// ReporterOption is a function that sets some option on the reporter.
-type ReporterOption func(c *reporterOptions)
-
-// ReporterOptions is a factory for all available ReporterOption's
-var ReporterOptions reporterOptions
-
-// reporterOptions control behavior of the reporter.
-type reporterOptions struct {
- // queueSize is the size of internal queue where reported spans are stored before they are processed in the background
- queueSize int
- // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
- bufferFlushInterval time.Duration
- // logger is used to log errors of span submissions
- logger log.DebugLogger
- // metrics is used to record runtime stats
- metrics *Metrics
-}
-
-// QueueSize creates a ReporterOption that sets the size of the internal queue where
-// spans are stored before they are processed.
-func (reporterOptions) QueueSize(queueSize int) ReporterOption {
- return func(r *reporterOptions) {
- r.queueSize = queueSize
- }
-}
-
-// Metrics creates a ReporterOption that initializes Metrics in the reporter,
-// which is used to record runtime statistics.
-func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
- return func(r *reporterOptions) {
- r.metrics = metrics
- }
-}
-
-// BufferFlushInterval creates a ReporterOption that sets how often the queue
-// is force-flushed.
-func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
- return func(r *reporterOptions) {
- r.bufferFlushInterval = bufferFlushInterval
- }
-}
-
-// Logger creates a ReporterOption that initializes the logger used to log
-// errors of span submissions.
-func (reporterOptions) Logger(logger Logger) ReporterOption {
- return func(r *reporterOptions) {
- r.logger = log.DebugLogAdapter(logger)
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
deleted file mode 100644
index 879948e9c9..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-An Observer that can be used to emit RPC metrics
-================================================
-
-It can be attached to the tracer during tracer construction.
-See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
deleted file mode 100644
index 51aa11b350..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
-package rpcmetrics
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
deleted file mode 100644
index 30555243d0..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import "sync"
-
-// normalizedEndpoints is a cache for endpointName -> safeName mappings.
-type normalizedEndpoints struct {
- names map[string]string
- maxSize int
- defaultName string
- normalizer NameNormalizer
- mux sync.RWMutex
-}
-
-func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
- return &normalizedEndpoints{
- maxSize: maxSize,
- normalizer: normalizer,
- names: make(map[string]string, maxSize),
- }
-}
-
-// normalize looks up the name in the cache, if not found it uses normalizer
-// to convert the name to a safe name. If called with more than maxSize unique
-// names it returns "" for all other names beyond those already cached.
-func (n *normalizedEndpoints) normalize(name string) string {
- n.mux.RLock()
- norm, ok := n.names[name]
- l := len(n.names)
- n.mux.RUnlock()
- if ok {
- return norm
- }
- if l >= n.maxSize {
- return ""
- }
- return n.normalizeWithLock(name)
-}
-
-func (n *normalizedEndpoints) normalizeWithLock(name string) string {
- norm := n.normalizer.Normalize(name)
- n.mux.Lock()
- defer n.mux.Unlock()
- // cache may have grown while we were not holding the lock
- if len(n.names) >= n.maxSize {
- return ""
- }
- n.names[name] = norm
- return norm
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
deleted file mode 100644
index a8cec2fa68..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import (
- "sync"
-
- "github.com/uber/jaeger-lib/metrics"
-)
-
-const (
- otherEndpointsPlaceholder = "other"
- endpointNameMetricTag = "endpoint"
-)
-
-// Metrics is a collection of metrics for an endpoint describing
-// throughput, success, errors, and performance.
-type Metrics struct {
- // RequestCountSuccess is a counter of the total number of successes.
- RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
-
- // RequestCountFailures is a counter of the number of times any failure has been observed.
- RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
-
- // RequestLatencySuccess is a latency histogram of successful requests.
- RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
-
- // RequestLatencyFailures is a latency histogram of failed requests.
- RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
-
- // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
- HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
-
- // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
- HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
-
- // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
- HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
-
- // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
- HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
-}
-
-func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
- if statusCode >= 200 && statusCode < 300 {
- m.HTTPStatusCode2xx.Inc(1)
- } else if statusCode >= 300 && statusCode < 400 {
- m.HTTPStatusCode3xx.Inc(1)
- } else if statusCode >= 400 && statusCode < 500 {
- m.HTTPStatusCode4xx.Inc(1)
- } else if statusCode >= 500 && statusCode < 600 {
- m.HTTPStatusCode5xx.Inc(1)
- }
-}
-
-// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
-// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
-// to a generic endpoint name "other".
-type MetricsByEndpoint struct {
- metricsFactory metrics.Factory
- endpoints *normalizedEndpoints
- metricsByEndpoint map[string]*Metrics
- mux sync.RWMutex
-}
-
-func newMetricsByEndpoint(
- metricsFactory metrics.Factory,
- normalizer NameNormalizer,
- maxNumberOfEndpoints int,
-) *MetricsByEndpoint {
- return &MetricsByEndpoint{
- metricsFactory: metricsFactory,
- endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
- metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
- }
-}
-
-func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
- safeName := m.endpoints.normalize(endpoint)
- if safeName == "" {
- safeName = otherEndpointsPlaceholder
- }
- m.mux.RLock()
- met := m.metricsByEndpoint[safeName]
- m.mux.RUnlock()
- if met != nil {
- return met
- }
-
- return m.getWithWriteLock(safeName)
-}
-
-// split to make easier to test
-func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
- m.mux.Lock()
- defer m.mux.Unlock()
-
- // it is possible that the name has been already registered after we released
- // the read lock and before we grabbed the write lock, so check for that.
- if met, ok := m.metricsByEndpoint[safeName]; ok {
- return met
- }
-
- // it would be nice to create the struct before locking, since Init() is somewhat
- // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
- met := &Metrics{}
- tags := map[string]string{endpointNameMetricTag: safeName}
- metrics.Init(met, m.metricsFactory, tags)
-
- m.metricsByEndpoint[safeName] = met
- return met
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
deleted file mode 100644
index 148d84b3a1..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-// NameNormalizer is used to convert the endpoint names to strings
-// that can be safely used as tags in the metrics.
-type NameNormalizer interface {
- Normalize(name string) string
-}
-
-// DefaultNameNormalizer converts endpoint names so that they contain only characters
-// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
-var DefaultNameNormalizer = &SimpleNameNormalizer{
- SafeSets: []SafeCharacterSet{
- &Range{From: 'a', To: 'z'},
- &Range{From: 'A', To: 'Z'},
- &Range{From: '0', To: '9'},
- &Char{'-'},
- &Char{'_'},
- &Char{'/'},
- &Char{'.'},
- },
- Replacement: '-',
-}
-
-// SimpleNameNormalizer uses a set of safe character sets.
-type SimpleNameNormalizer struct {
- SafeSets []SafeCharacterSet
- Replacement byte
-}
-
-// SafeCharacterSet determines if the given character is "safe"
-type SafeCharacterSet interface {
- IsSafe(c byte) bool
-}
-
-// Range implements SafeCharacterSet
-type Range struct {
- From, To byte
-}
-
-// IsSafe implements SafeCharacterSet
-func (r *Range) IsSafe(c byte) bool {
- return c >= r.From && c <= r.To
-}
-
-// Char implements SafeCharacterSet
-type Char struct {
- Val byte
-}
-
-// IsSafe implements SafeCharacterSet
-func (ch *Char) IsSafe(c byte) bool {
- return c == ch.Val
-}
-
-// Normalize checks each character in the string against SafeSets,
-// and if it's not safe substitutes it with Replacement.
-func (n *SimpleNameNormalizer) Normalize(name string) string {
- var retMe []byte
- nameBytes := []byte(name)
- for i, b := range nameBytes {
- if n.safeByte(b) {
- if retMe != nil {
- retMe[i] = b
- }
- } else {
- if retMe == nil {
- retMe = make([]byte, len(nameBytes))
- copy(retMe[0:i], nameBytes[0:i])
- }
- retMe[i] = n.Replacement
- }
- }
- if retMe == nil {
- return name
- }
- return string(retMe)
-}
-
-// safeByte checks if b against all safe charsets.
-func (n *SimpleNameNormalizer) safeByte(b byte) bool {
- for i := range n.SafeSets {
- if n.SafeSets[i].IsSafe(b) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
deleted file mode 100644
index eca5ff6f3b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpcmetrics
-
-import (
- "strconv"
- "sync"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/uber/jaeger-lib/metrics"
-
- jaeger "github.com/uber/jaeger-client-go"
-)
-
-const defaultMaxNumberOfEndpoints = 200
-
-// Observer is an observer that can emit RPC metrics.
-type Observer struct {
- metricsByEndpoint *MetricsByEndpoint
-}
-
-// NewObserver creates a new observer that can emit RPC metrics.
-func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
- return &Observer{
- metricsByEndpoint: newMetricsByEndpoint(
- metricsFactory,
- normalizer,
- defaultMaxNumberOfEndpoints,
- ),
- }
-}
-
-// OnStartSpan creates a new Observer for the span.
-func (o *Observer) OnStartSpan(
- operationName string,
- options opentracing.StartSpanOptions,
-) jaeger.SpanObserver {
- return NewSpanObserver(o.metricsByEndpoint, operationName, options)
-}
-
-// SpanKind identifies the span as inboud, outbound, or internal
-type SpanKind int
-
-const (
- // Local span kind
- Local SpanKind = iota
- // Inbound span kind
- Inbound
- // Outbound span kind
- Outbound
-)
-
-// SpanObserver collects RPC metrics
-type SpanObserver struct {
- metricsByEndpoint *MetricsByEndpoint
- operationName string
- startTime time.Time
- mux sync.Mutex
- kind SpanKind
- httpStatusCode uint16
- err bool
-}
-
-// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
-func NewSpanObserver(
- metricsByEndpoint *MetricsByEndpoint,
- operationName string,
- options opentracing.StartSpanOptions,
-) *SpanObserver {
- so := &SpanObserver{
- metricsByEndpoint: metricsByEndpoint,
- operationName: operationName,
- startTime: options.StartTime,
- }
- for k, v := range options.Tags {
- so.handleTagInLock(k, v)
- }
- return so
-}
-
-// handleTags watches for special tags
-// - SpanKind
-// - HttpStatusCode
-// - Error
-func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
- if key == string(ext.SpanKind) {
- if v, ok := value.(ext.SpanKindEnum); ok {
- value = string(v)
- }
- if v, ok := value.(string); ok {
- if v == string(ext.SpanKindRPCClientEnum) {
- so.kind = Outbound
- } else if v == string(ext.SpanKindRPCServerEnum) {
- so.kind = Inbound
- }
- }
- return
- }
- if key == string(ext.HTTPStatusCode) {
- if v, ok := value.(uint16); ok {
- so.httpStatusCode = v
- } else if v, ok := value.(int); ok {
- so.httpStatusCode = uint16(v)
- } else if v, ok := value.(string); ok {
- if vv, err := strconv.Atoi(v); err == nil {
- so.httpStatusCode = uint16(vv)
- }
- }
- return
- }
- if key == string(ext.Error) {
- if v, ok := value.(bool); ok {
- so.err = v
- } else if v, ok := value.(string); ok {
- if vv, err := strconv.ParseBool(v); err == nil {
- so.err = vv
- }
- }
- return
- }
-}
-
-// OnFinish emits the RPC metrics. It only has an effect when operation name
-// is not blank, and the span kind is an RPC server.
-func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
- so.mux.Lock()
- defer so.mux.Unlock()
-
- if so.operationName == "" || so.kind != Inbound {
- return
- }
-
- mets := so.metricsByEndpoint.get(so.operationName)
- latency := options.FinishTime.Sub(so.startTime)
- if so.err {
- mets.RequestCountFailures.Inc(1)
- mets.RequestLatencyFailures.Record(latency)
- } else {
- mets.RequestCountSuccess.Inc(1)
- mets.RequestLatencySuccess.Record(latency)
- }
- mets.recordHTTPStatusCode(so.httpStatusCode)
-}
-
-// OnSetOperationName records new operation name.
-func (so *SpanObserver) OnSetOperationName(operationName string) {
- so.mux.Lock()
- so.operationName = operationName
- so.mux.Unlock()
-}
-
-// OnSetTag implements SpanObserver
-func (so *SpanObserver) OnSetTag(key string, value interface{}) {
- so.mux.Lock()
- so.handleTagInLock(key, value)
- so.mux.Unlock()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
deleted file mode 100644
index d0be8ad500..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler.go
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "math"
- "strings"
- "sync"
-
- "github.com/uber/jaeger-client-go/thrift-gen/sampling"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- defaultMaxOperations = 2000
-)
-
-// Sampler decides whether a new trace should be sampled or not.
-type Sampler interface {
- // IsSampled decides whether a trace with given `id` and `operation`
- // should be sampled. This function will also return the tags that
- // can be used to identify the type of sampling that was applied to
- // the root span. Most simple samplers would return two tags,
- // sampler.type and sampler.param, similar to those used in the Configuration
- IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
-
- // Close does a clean shutdown of the sampler, stopping any background
- // go-routines it may have started.
- Close()
-
- // Equal checks if the `other` sampler is functionally equivalent
- // to this sampler.
- // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
- Equal(other Sampler) bool
-}
-
-// -----------------------
-
-// ConstSampler is a sampler that always makes the same decision.
-type ConstSampler struct {
- legacySamplerV1Base
- Decision bool
- tags []Tag
-}
-
-// NewConstSampler creates a ConstSampler.
-func NewConstSampler(sample bool) *ConstSampler {
- tags := []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeConst},
- {key: SamplerParamTagKey, value: sample},
- }
- s := &ConstSampler{
- Decision: sample,
- tags: tags,
- }
- s.delegate = s.IsSampled
- return s
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.Decision, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *ConstSampler) Close() {
- // nothing to do
-}
-
-// Equal implements Equal() of Sampler.
-func (s *ConstSampler) Equal(other Sampler) bool {
- if o, ok := other.(*ConstSampler); ok {
- return s.Decision == o.Decision
- }
- return false
-}
-
-// String is used to log sampler details.
-func (s *ConstSampler) String() string {
- return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
-}
-
-// -----------------------
-
-// ProbabilisticSampler is a sampler that randomly samples a certain percentage
-// of traces.
-type ProbabilisticSampler struct {
- legacySamplerV1Base
- samplingRate float64
- samplingBoundary uint64
- tags []Tag
-}
-
-const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
-
-// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
-// samplingRate, in the range between 0.0 and 1.0.
-//
-// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
-// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
-// TODO remove the error from this function for next major release
-func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
- if samplingRate < 0.0 || samplingRate > 1.0 {
- return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
- }
- return newProbabilisticSampler(samplingRate), nil
-}
-
-func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
- s := new(ProbabilisticSampler)
- s.delegate = s.IsSampled
- return s.init(samplingRate)
-}
-
-func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
- s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
- s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
- {key: SamplerParamTagKey, value: s.samplingRate},
- }
- return s
-}
-
-// SamplingRate returns the sampling probability this sampled was constructed with.
-func (s *ProbabilisticSampler) SamplingRate() float64 {
- return s.samplingRate
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *ProbabilisticSampler) Close() {
- // nothing to do
-}
-
-// Equal implements Equal() of Sampler.
-func (s *ProbabilisticSampler) Equal(other Sampler) bool {
- if o, ok := other.(*ProbabilisticSampler); ok {
- return s.samplingBoundary == o.samplingBoundary
- }
- return false
-}
-
-// Update modifies in-place the sampling rate. Locking must be done externally.
-func (s *ProbabilisticSampler) Update(samplingRate float64) error {
- if samplingRate < 0.0 || samplingRate > 1.0 {
- return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
- }
- s.init(samplingRate)
- return nil
-}
-
-// String is used to log sampler details.
-func (s *ProbabilisticSampler) String() string {
- return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
-}
-
-// -----------------------
-
-// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
-// burstiness of the service, i.e. a service with uniformly distributed requests will have those
-// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
-// number of sequential requests can be sampled each second.
-type RateLimitingSampler struct {
- legacySamplerV1Base
- maxTracesPerSecond float64
- rateLimiter *utils.ReconfigurableRateLimiter
- tags []Tag
-}
-
-// NewRateLimitingSampler creates new RateLimitingSampler.
-func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
- s := new(RateLimitingSampler)
- s.delegate = s.IsSampled
- return s.init(maxTracesPerSecond)
-}
-
-func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
- if s.rateLimiter == nil {
- s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
- } else {
- s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
- }
- s.maxTracesPerSecond = maxTracesPerSecond
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
- {key: SamplerParamTagKey, value: maxTracesPerSecond},
- }
- return s
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return s.rateLimiter.CheckCredit(1.0), s.tags
-}
-
-// Update reconfigures the rate limiter, while preserving its accumulated balance.
-// Locking must be done externally.
-func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
- if s.maxTracesPerSecond != maxTracesPerSecond {
- s.init(maxTracesPerSecond)
- }
-}
-
-// Close does nothing.
-func (s *RateLimitingSampler) Close() {
- // nothing to do
-}
-
-// Equal compares with another sampler.
-func (s *RateLimitingSampler) Equal(other Sampler) bool {
- if o, ok := other.(*RateLimitingSampler); ok {
- return s.maxTracesPerSecond == o.maxTracesPerSecond
- }
- return false
-}
-
-// String is used to log sampler details.
-func (s *RateLimitingSampler) String() string {
- return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
-}
-
-// -----------------------
-
-// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
-// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
-// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
-// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
-//
-// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
-// samplers return true, the tags for ProbabilisticSampler will be used.
-type GuaranteedThroughputProbabilisticSampler struct {
- probabilisticSampler *ProbabilisticSampler
- lowerBoundSampler *RateLimitingSampler
- tags []Tag
- samplingRate float64
- lowerBound float64
-}
-
-// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
-// ProbabilisticSampler and RateLimitingSampler.
-func NewGuaranteedThroughputProbabilisticSampler(
- lowerBound, samplingRate float64,
-) (*GuaranteedThroughputProbabilisticSampler, error) {
- return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
-}
-
-func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
- s := &GuaranteedThroughputProbabilisticSampler{
- lowerBoundSampler: NewRateLimitingSampler(lowerBound),
- lowerBound: lowerBound,
- }
- s.setProbabilisticSampler(samplingRate)
- return s
-}
-
-func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
- if s.probabilisticSampler == nil {
- s.probabilisticSampler = newProbabilisticSampler(samplingRate)
- } else if s.samplingRate != samplingRate {
- s.probabilisticSampler.init(samplingRate)
- }
- // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
- samplingRate = s.probabilisticSampler.SamplingRate()
- if s.samplingRate != samplingRate || s.tags == nil {
- s.samplingRate = s.probabilisticSampler.SamplingRate()
- s.tags = []Tag{
- {key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
- {key: SamplerParamTagKey, value: s.samplingRate},
- }
- }
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
- s.lowerBoundSampler.IsSampled(id, operation)
- return true, tags
- }
- sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
- return sampled, s.tags
-}
-
-// Close implements Close() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) Close() {
- s.probabilisticSampler.Close()
- s.lowerBoundSampler.Close()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
- // more information.
- return false
-}
-
-// this function should only be called while holding a Write lock
-func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
- s.setProbabilisticSampler(samplingRate)
- if s.lowerBound != lowerBound {
- s.lowerBoundSampler.Update(lowerBound)
- s.lowerBound = lowerBound
- }
-}
-
-func (s GuaranteedThroughputProbabilisticSampler) String() string {
- return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
-}
-
-// -----------------------
-
-// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
-// on a per-operation basis.
-type PerOperationSampler struct {
- sync.RWMutex
-
- samplers map[string]*GuaranteedThroughputProbabilisticSampler
- defaultSampler *ProbabilisticSampler
- lowerBound float64
- maxOperations int
-
- // see description in PerOperationSamplerParams
- operationNameLateBinding bool
-}
-
-// NewAdaptiveSampler returns a new PerOperationSampler.
-// Deprecated: please use NewPerOperationSampler.
-func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
- return NewPerOperationSampler(PerOperationSamplerParams{
- MaxOperations: maxOperations,
- Strategies: strategies,
- }), nil
-}
-
-// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
-type PerOperationSamplerParams struct {
- // Max number of operations that will be tracked. Other operations will be given default strategy.
- MaxOperations int
-
- // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
- // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
- // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
- // in applications that always provide the correct span name on trace creation.
- //
- // For backwards compatibility this option is off by default.
- OperationNameLateBinding bool
-
- // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
- Strategies *sampling.PerOperationSamplingStrategies
-}
-
-// NewPerOperationSampler returns a new PerOperationSampler.
-func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
- if params.MaxOperations <= 0 {
- params.MaxOperations = defaultMaxOperations
- }
- samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
- for _, strategy := range params.Strategies.PerOperationStrategies {
- sampler := newGuaranteedThroughputProbabilisticSampler(
- params.Strategies.DefaultLowerBoundTracesPerSecond,
- strategy.ProbabilisticSampling.SamplingRate,
- )
- samplers[strategy.Operation] = sampler
- }
- return &PerOperationSampler{
- samplers: samplers,
- defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
- lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
- maxOperations: params.MaxOperations,
- operationNameLateBinding: params.OperationNameLateBinding,
- }
-}
-
-// IsSampled is not used and only exists to match Sampler V1 API.
-// TODO (breaking change) remove when upgrading everything to SamplerV2
-func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return false, nil
-}
-
-func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
- samplerV1 := s.getSamplerForOperation(operationName)
- var sampled bool
- var tags []Tag
- if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
- sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
- }
- return sampled, tags
-}
-
-// OnCreateSpan implements OnCreateSpan of SamplerV2.
-func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
- sampled, tags := s.trySampling(span, span.OperationName())
- return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
-}
-
-// OnSetOperationName implements OnSetOperationName of SamplerV2.
-func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- sampled, tags := s.trySampling(span, operationName)
- return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
-}
-
-// OnSetTag implements OnSetTag of SamplerV2.
-func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-// OnFinishSpan implements OnFinishSpan of SamplerV2.
-func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
- s.RLock()
- sampler, ok := s.samplers[operation]
- if ok {
- defer s.RUnlock()
- return sampler
- }
- s.RUnlock()
- s.Lock()
- defer s.Unlock()
-
- // Check if sampler has already been created
- sampler, ok = s.samplers[operation]
- if ok {
- return sampler
- }
- // Store only up to maxOperations of unique ops.
- if len(s.samplers) >= s.maxOperations {
- return s.defaultSampler
- }
- newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
- s.samplers[operation] = newSampler
- return newSampler
-}
-
-// Close invokes Close on all underlying samplers.
-func (s *PerOperationSampler) Close() {
- s.Lock()
- defer s.Unlock()
- for _, sampler := range s.samplers {
- sampler.Close()
- }
- s.defaultSampler.Close()
-}
-
-func (s *PerOperationSampler) String() string {
- var sb strings.Builder
-
- fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
- fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
- fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
- fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
- fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
- fmt.Fprintf(&sb, "samplers=[")
- for operationName, sampler := range s.samplers {
- fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
- }
- fmt.Fprintf(&sb, "])")
-
- return sb.String()
-}
-
-// Equal is not used.
-// TODO (breaking change) remove this in the future
-func (s *PerOperationSampler) Equal(other Sampler) bool {
- // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
- // samplers which all need to be initialized before this function can be called for a comparison.
- // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
- // changing. Hence this function always returns false so that the update function can be called.
- // Once the Equal() function is removed from the Sampler API, this will no longer be needed.
- return false
-}
-
-func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
- s.Lock()
- defer s.Unlock()
- newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
- for _, strategy := range strategies.PerOperationStrategies {
- operation := strategy.Operation
- samplingRate := strategy.ProbabilisticSampling.SamplingRate
- lowerBound := strategies.DefaultLowerBoundTracesPerSecond
- if sampler, ok := s.samplers[operation]; ok {
- sampler.update(lowerBound, samplingRate)
- newSamplers[operation] = sampler
- } else {
- sampler := newGuaranteedThroughputProbabilisticSampler(
- lowerBound,
- samplingRate,
- )
- newSamplers[operation] = sampler
- }
- }
- s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
- if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
- s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
- }
- s.samplers = newSamplers
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
deleted file mode 100644
index 119f0a1bb6..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/thrift-gen/sampling"
-)
-
-const (
- defaultRemoteSamplingTimeout = 10 * time.Second
- defaultSamplingRefreshInterval = time.Minute
-)
-
-// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
-type SamplingStrategyFetcher interface {
- Fetch(service string) ([]byte, error)
-}
-
-// SamplingStrategyParser is used to parse sampling strategy updates. The output object
-// should be of the type that is recognized by the SamplerUpdaters.
-type SamplingStrategyParser interface {
- Parse(response []byte) (interface{}, error)
-}
-
-// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
-// retrieved from remote config server, to the current sampler. The updater can modify
-// the sampler in-place if sampler supports it, or create a new one.
-//
-// If the strategy does not contain configuration for the sampler in question,
-// updater must return modifiedSampler=nil to give other updaters a chance to inspect
-// the sampling strategy response.
-//
-// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
-type SamplerUpdater interface {
- Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
-}
-
-// RemotelyControlledSampler is a delegating sampler that polls a remote server
-// for the appropriate sampling strategy, constructs a corresponding sampler and
-// delegates to it for sampling decisions.
-type RemotelyControlledSampler struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- closed int64 // 0 - not closed, 1 - closed
-
- sync.RWMutex // used to serialize access to samplerOptions.sampler
- samplerOptions
-
- serviceName string
- doneChan chan *sync.WaitGroup
-}
-
-// NewRemotelyControlledSampler creates a sampler that periodically pulls
-// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
-func NewRemotelyControlledSampler(
- serviceName string,
- opts ...SamplerOption,
-) *RemotelyControlledSampler {
- options := new(samplerOptions).applyOptionsAndDefaults(opts...)
- sampler := &RemotelyControlledSampler{
- samplerOptions: *options,
- serviceName: serviceName,
- doneChan: make(chan *sync.WaitGroup),
- }
- go sampler.pollController()
- return sampler
-}
-
-// IsSampled implements IsSampled() of Sampler.
-// TODO (breaking change) remove when Sampler V1 is removed
-func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- return false, nil
-}
-
-// OnCreateSpan implements OnCreateSpan of SamplerV2.
-func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.OnCreateSpan(span)
-}
-
-// OnSetOperationName implements OnSetOperationName of SamplerV2.
-func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.OnSetOperationName(span, operationName)
-}
-
-// OnSetTag implements OnSetTag of SamplerV2.
-func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.OnSetTag(span, key, value)
-}
-
-// OnFinishSpan implements OnFinishSpan of SamplerV2.
-func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.OnFinishSpan(span)
-}
-
-// Close implements Close() of Sampler.
-func (s *RemotelyControlledSampler) Close() {
- if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
- s.logger.Error("Repeated attempt to close the sampler is ignored")
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(1)
- s.doneChan <- &wg
- wg.Wait()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
- // more information.
- return false
-}
-
-func (s *RemotelyControlledSampler) pollController() {
- ticker := time.NewTicker(s.samplingRefreshInterval)
- defer ticker.Stop()
- s.pollControllerWithTicker(ticker)
-}
-
-func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
- for {
- select {
- case <-ticker.C:
- s.UpdateSampler()
- case wg := <-s.doneChan:
- wg.Done()
- return
- }
- }
-}
-
-// Sampler returns the currently active sampler.
-func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
- s.RLock()
- defer s.RUnlock()
- return s.sampler
-}
-func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
- s.Lock()
- defer s.Unlock()
- s.sampler = sampler
-}
-
-// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
-// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
-func (s *RemotelyControlledSampler) UpdateSampler() {
- res, err := s.samplingFetcher.Fetch(s.serviceName)
- if err != nil {
- s.metrics.SamplerQueryFailure.Inc(1)
- s.logger.Infof("failed to fetch sampling strategy: %v", err)
- return
- }
- strategy, err := s.samplingParser.Parse(res)
- if err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("failed to parse sampling strategy response: %v", err)
- return
- }
-
- s.Lock()
- defer s.Unlock()
-
- s.metrics.SamplerRetrieved.Inc(1)
- if err := s.updateSamplerViaUpdaters(strategy); err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
- return
- }
- s.metrics.SamplerUpdated.Inc(1)
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
- for _, updater := range s.updaters {
- sampler, err := updater.Update(s.sampler, strategy)
- if err != nil {
- return err
- }
- if sampler != nil {
- s.logger.Debugf("sampler updated: %+v", sampler)
- s.sampler = sampler
- return nil
- }
- }
- return fmt.Errorf("unsupported sampling strategy %+v", strategy)
-}
-
-// -----------------------
-
-// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-type ProbabilisticSamplerUpdater struct{}
-
-// Update implements Update of SamplerUpdater.
-func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if resp, ok := strategy.(response); ok {
- if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
- if ps, ok := sampler.(*ProbabilisticSampler); ok {
- if err := ps.Update(probabilistic.SamplingRate); err != nil {
- return nil, err
- }
- return sampler, nil
- }
- return newProbabilisticSampler(probabilistic.SamplingRate), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-type RateLimitingSamplerUpdater struct{}
-
-// Update implements Update of SamplerUpdater.
-func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if resp, ok := strategy.(response); ok {
- if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
- rateLimit := float64(rateLimiting.MaxTracesPerSecond)
- if rl, ok := sampler.(*RateLimitingSampler); ok {
- rl.Update(rateLimit)
- return rl, nil
- }
- return NewRateLimitingSampler(rateLimit), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
-// Fields have the same meaning as in PerOperationSamplerParams.
-type AdaptiveSamplerUpdater struct {
- MaxOperations int
- OperationNameLateBinding bool
-}
-
-// Update implements Update of SamplerUpdater.
-func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
- type response interface {
- GetOperationSampling() *sampling.PerOperationSamplingStrategies
- }
- var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
- if p, ok := strategy.(response); ok {
- if operations := p.GetOperationSampling(); operations != nil {
- if as, ok := sampler.(*PerOperationSampler); ok {
- as.update(operations)
- return as, nil
- }
- return NewPerOperationSampler(PerOperationSamplerParams{
- MaxOperations: u.MaxOperations,
- OperationNameLateBinding: u.OperationNameLateBinding,
- Strategies: operations,
- }), nil
- }
- }
- return nil, nil
-}
-
-// -----------------------
-
-type httpSamplingStrategyFetcher struct {
- serverURL string
- logger log.DebugLogger
- httpClient http.Client
-}
-
-func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher {
- customTransport := http.DefaultTransport.(*http.Transport).Clone()
- customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout
-
- return &httpSamplingStrategyFetcher{
- serverURL: serverURL,
- logger: logger,
- httpClient: http.Client{
- Transport: customTransport,
- },
- }
-}
-
-func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
- v := url.Values{}
- v.Set("service", serviceName)
- uri := f.serverURL + "?" + v.Encode()
-
- resp, err := f.httpClient.Get(uri)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- if err := resp.Body.Close(); err != nil {
- f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
- }
- }()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- if resp.StatusCode >= 400 {
- return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
- }
-
- return body, nil
-}
-
-// -----------------------
-
-type samplingStrategyParser struct{}
-
-func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
- strategy := new(sampling.SamplingStrategyResponse)
- if err := json.Unmarshal(response, strategy); err != nil {
- return nil, err
- }
- return strategy, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
deleted file mode 100644
index 64b028bf32..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/uber/jaeger-client-go/log"
-)
-
-// SamplerOption is a function that sets some option on the sampler
-type SamplerOption func(options *samplerOptions)
-
-// SamplerOptions is a factory for all available SamplerOption's.
-var SamplerOptions SamplerOptionsFactory
-
-// SamplerOptionsFactory is a factory for all available SamplerOption's.
-// The type acts as a namespace for factory functions. It is public to
-// make the functions discoverable via godoc. Recommended to be used
-// via global SamplerOptions variable.
-type SamplerOptionsFactory struct{}
-
-type samplerOptions struct {
- metrics *Metrics
- sampler SamplerV2
- logger log.DebugLogger
- samplingServerURL string
- samplingRefreshInterval time.Duration
- samplingFetcher SamplingStrategyFetcher
- samplingParser SamplingStrategyParser
- updaters []SamplerUpdater
- posParams PerOperationSamplerParams
-}
-
-// Metrics creates a SamplerOption that initializes Metrics on the sampler,
-// which is used to emit statistics.
-func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
- return func(o *samplerOptions) {
- o.metrics = m
- }
-}
-
-// MaxOperations creates a SamplerOption that sets the maximum number of
-// operations the sampler will keep track of.
-func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
- return func(o *samplerOptions) {
- o.posParams.MaxOperations = maxOperations
- }
-}
-
-// OperationNameLateBinding creates a SamplerOption that sets the respective
-// field in the PerOperationSamplerParams.
-func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
- return func(o *samplerOptions) {
- o.posParams.OperationNameLateBinding = enable
- }
-}
-
-// InitialSampler creates a SamplerOption that sets the initial sampler
-// to use before a remote sampler is created and used.
-func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
- return func(o *samplerOptions) {
- o.sampler = samplerV1toV2(sampler)
- }
-}
-
-// Logger creates a SamplerOption that sets the logger used by the sampler.
-func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
- return func(o *samplerOptions) {
- o.logger = log.DebugLogAdapter(logger)
- }
-}
-
-// SamplingServerURL creates a SamplerOption that sets the sampling server url
-// of the local agent that contains the sampling strategies.
-func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingServerURL = samplingServerURL
- }
-}
-
-// SamplingRefreshInterval creates a SamplerOption that sets how often the
-// sampler will poll local agent for the appropriate sampling strategy.
-func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingRefreshInterval = samplingRefreshInterval
- }
-}
-
-// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
-func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingFetcher = fetcher
- }
-}
-
-// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
-func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
- return func(o *samplerOptions) {
- o.samplingParser = parser
- }
-}
-
-// Updaters creates a SamplerOption that initializes sampler updaters.
-func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
- return func(o *samplerOptions) {
- o.updaters = updaters
- }
-}
-
-func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
- for _, option := range opts {
- option(o)
- }
- if o.sampler == nil {
- o.sampler = newProbabilisticSampler(0.001)
- }
- if o.logger == nil {
- o.logger = log.NullLogger
- }
- if o.samplingServerURL == "" {
- o.samplingServerURL = DefaultSamplingServerURL
- }
- if o.metrics == nil {
- o.metrics = NewNullMetrics()
- }
- if o.samplingRefreshInterval <= 0 {
- o.samplingRefreshInterval = defaultSamplingRefreshInterval
- }
- if o.samplingFetcher == nil {
- o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger)
- }
- if o.samplingParser == nil {
- o.samplingParser = new(samplingStrategyParser)
- }
- if o.updaters == nil {
- o.updaters = []SamplerUpdater{
- &AdaptiveSamplerUpdater{
- MaxOperations: o.posParams.MaxOperations,
- OperationNameLateBinding: o.posParams.OperationNameLateBinding,
- },
- new(ProbabilisticSamplerUpdater),
- new(RateLimitingSamplerUpdater),
- }
- }
- return o
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
deleted file mode 100644
index a50671a236..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-// SamplingDecision is returned by the V2 samplers.
-type SamplingDecision struct {
- Sample bool
- Retryable bool
- Tags []Tag
-}
-
-// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
-// be made at different points of the span lifecycle.
-type SamplerV2 interface {
- OnCreateSpan(span *Span) SamplingDecision
- OnSetOperationName(span *Span, operationName string) SamplingDecision
- OnSetTag(span *Span, key string, value interface{}) SamplingDecision
- OnFinishSpan(span *Span) SamplingDecision
-
- // Close does a clean shutdown of the sampler, stopping any background
- // go-routines it may have started.
- Close()
-}
-
-// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
-func samplerV1toV2(s Sampler) SamplerV2 {
- if s2, ok := s.(SamplerV2); ok {
- return s2
- }
- type legacySamplerV1toV2Adapter struct {
- legacySamplerV1Base
- }
- return &legacySamplerV1toV2Adapter{
- legacySamplerV1Base: legacySamplerV1Base{
- delegate: s.IsSampled,
- },
- }
-}
-
-// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
-// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
-// for backwards compatibility reasons.
-// TODO (breaking change) remove this in the next major release
-type SamplerV2Base struct{}
-
-// IsSampled implements IsSampled of Sampler.
-func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
- return false, nil
-}
-
-// Close implements Close of Sampler.
-func (SamplerV2Base) Close() {}
-
-// Equal implements Equal of Sampler.
-func (SamplerV2Base) Equal(other Sampler) bool { return false }
-
-// legacySamplerV1Base is used as a base for simple samplers that only implement
-// the legacy isSampled() function that is not sensitive to its arguments.
-type legacySamplerV1Base struct {
- delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
-}
-
-func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
- isSampled, tags := s.delegate(span.context.traceID, span.operationName)
- return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
-}
-
-func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- isSampled, tags := s.delegate(span.context.traceID, span.operationName)
- return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
-}
-
-func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
- return SamplingDecision{Sample: false, Retryable: true}
-}
-
-func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
deleted file mode 100644
index 997cffdd88..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/opentracing/opentracing-go/log"
-)
-
-// Span implements opentracing.Span
-type Span struct {
- // referenceCounter used to increase the lifetime of
- // the object before return it into the pool.
- referenceCounter int32
-
- sync.RWMutex
-
- tracer *Tracer
-
- // TODO: (breaking change) change to use a pointer
- context SpanContext
-
- // The name of the "operation" this span is an instance of.
- // Known as a "span name" in some implementations.
- operationName string
-
- // firstInProcess, if true, indicates that this span is the root of the (sub)tree
- // of spans in the current process. In other words it's true for the root spans,
- // and the ingress spans when the process joins another trace.
- firstInProcess bool
-
- // startTime is the timestamp indicating when the span began, with microseconds precision.
- startTime time.Time
-
- // duration returns duration of the span with microseconds precision.
- // Zero value means duration is unknown.
- duration time.Duration
-
- // tags attached to this span
- tags []Tag
-
- // The span's "micro-log"
- logs []opentracing.LogRecord
-
- // The number of logs dropped because of MaxLogsPerSpan.
- numDroppedLogs int
-
- // references for this span
- references []Reference
-
- observer ContribSpanObserver
-}
-
-// Tag is a simple key value wrapper.
-// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
-type Tag struct {
- key string
- value interface{}
-}
-
-// NewTag creates a new Tag.
-// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
-func NewTag(key string, value interface{}) Tag {
- return Tag{key: key, value: value}
-}
-
-// SetOperationName sets or changes the operation name.
-func (s *Span) SetOperationName(operationName string) opentracing.Span {
- s.Lock()
- s.operationName = operationName
- ctx := s.context
- s.Unlock()
- if !ctx.isSamplingFinalized() {
- decision := s.tracer.sampler.OnSetOperationName(s, operationName)
- s.applySamplingDecision(decision, true)
- }
- s.observer.OnSetOperationName(operationName)
- return s
-}
-
-// SetTag implements SetTag() of opentracing.Span
-func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
- return s.setTagInternal(key, value, true)
-}
-
-func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
- var ctx SpanContext
- var operationName string
- if lock {
- ctx = s.SpanContext()
- operationName = s.OperationName()
- } else {
- ctx = s.context
- operationName = s.operationName
- }
-
- s.observer.OnSetTag(key, value)
- if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) {
- return s
- }
- if !ctx.isSamplingFinalized() {
- decision := s.tracer.sampler.OnSetTag(s, key, value)
- s.applySamplingDecision(decision, lock)
- }
- if ctx.isWriteable() {
- if lock {
- s.Lock()
- defer s.Unlock()
- }
- s.appendTagNoLocking(key, value)
- }
- return s
-}
-
-// SpanContext returns span context
-func (s *Span) SpanContext() SpanContext {
- s.Lock()
- defer s.Unlock()
- return s.context
-}
-
-// StartTime returns span start time
-func (s *Span) StartTime() time.Time {
- s.Lock()
- defer s.Unlock()
- return s.startTime
-}
-
-// Duration returns span duration
-func (s *Span) Duration() time.Duration {
- s.Lock()
- defer s.Unlock()
- return s.duration
-}
-
-// Tags returns tags for span
-func (s *Span) Tags() opentracing.Tags {
- s.Lock()
- defer s.Unlock()
- var result = make(opentracing.Tags, len(s.tags))
- for _, tag := range s.tags {
- result[tag.key] = tag.value
- }
- return result
-}
-
-// Logs returns micro logs for span
-func (s *Span) Logs() []opentracing.LogRecord {
- s.Lock()
- defer s.Unlock()
-
- logs := append([]opentracing.LogRecord(nil), s.logs...)
- if s.numDroppedLogs != 0 {
- fixLogs(logs, s.numDroppedLogs)
- }
-
- return logs
-}
-
-// References returns references for this span
-func (s *Span) References() []opentracing.SpanReference {
- s.Lock()
- defer s.Unlock()
-
- if s.references == nil || len(s.references) == 0 {
- return nil
- }
-
- result := make([]opentracing.SpanReference, len(s.references))
- for i, r := range s.references {
- result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
- }
- return result
-}
-
-func (s *Span) appendTagNoLocking(key string, value interface{}) {
- s.tags = append(s.tags, Tag{key: key, value: value})
-}
-
-// LogFields implements opentracing.Span API
-func (s *Span) LogFields(fields ...log.Field) {
- s.Lock()
- defer s.Unlock()
- if !s.context.IsSampled() {
- return
- }
- s.logFieldsNoLocking(fields...)
-}
-
-// this function should only be called while holding a Write lock
-func (s *Span) logFieldsNoLocking(fields ...log.Field) {
- lr := opentracing.LogRecord{
- Fields: fields,
- Timestamp: time.Now(),
- }
- s.appendLogNoLocking(lr)
-}
-
-// LogKV implements opentracing.Span API
-func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
- s.RLock()
- sampled := s.context.IsSampled()
- s.RUnlock()
- if !sampled {
- return
- }
- fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
- if err != nil {
- s.LogFields(log.Error(err), log.String("function", "LogKV"))
- return
- }
- s.LogFields(fields...)
-}
-
-// LogEvent implements opentracing.Span API
-func (s *Span) LogEvent(event string) {
- s.Log(opentracing.LogData{Event: event})
-}
-
-// LogEventWithPayload implements opentracing.Span API
-func (s *Span) LogEventWithPayload(event string, payload interface{}) {
- s.Log(opentracing.LogData{Event: event, Payload: payload})
-}
-
-// Log implements opentracing.Span API
-func (s *Span) Log(ld opentracing.LogData) {
- s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- if ld.Timestamp.IsZero() {
- ld.Timestamp = s.tracer.timeNow()
- }
- s.appendLogNoLocking(ld.ToLogRecord())
- }
-}
-
-// this function should only be called while holding a Write lock
-func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
- maxLogs := s.tracer.options.maxLogsPerSpan
- if maxLogs == 0 || len(s.logs) < maxLogs {
- s.logs = append(s.logs, lr)
- return
- }
-
- // We have too many logs. We don't touch the first numOld logs; we treat the
- // rest as a circular buffer and overwrite the oldest log among those.
- numOld := (maxLogs - 1) / 2
- numNew := maxLogs - numOld
- s.logs[numOld+s.numDroppedLogs%numNew] = lr
- s.numDroppedLogs++
-}
-
-// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
-// the end (i.e. pos circular left shifts).
-func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
- // This algorithm is described in:
- // http://www.cplusplus.com/reference/algorithm/rotate
- for first, middle, next := 0, pos, pos; first != middle; {
- buf[first], buf[next] = buf[next], buf[first]
- first++
- next++
- if next == len(buf) {
- next = middle
- } else if first == middle {
- middle = next
- }
- }
-}
-
-func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
- // We dropped some log events, which means that we used part of Logs as a
- // circular buffer (see appendLog). De-circularize it.
- numOld := (len(logs) - 1) / 2
- numNew := len(logs) - numOld
- rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
-
- // Replace the log in the middle (the oldest "new" log) with information
- // about the dropped logs. This means that we are effectively dropping one
- // more "new" log.
- numDropped := numDroppedLogs + 1
- logs[numOld] = opentracing.LogRecord{
- // Keep the timestamp of the last dropped event.
- Timestamp: logs[numOld].Timestamp,
- Fields: []log.Field{
- log.String("event", "dropped Span logs"),
- log.Int("dropped_log_count", numDropped),
- log.String("component", "jaeger-client"),
- },
- }
-}
-
-func (s *Span) fixLogsIfDropped() {
- if s.numDroppedLogs == 0 {
- return
- }
- fixLogs(s.logs, s.numDroppedLogs)
- s.numDroppedLogs = 0
-}
-
-// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext.
-// The call is proxied via tracer.baggageSetter to allow policies to be applied
-// before allowing to set/replace baggage keys.
-// The setter eventually stores a new SpanContext with extended baggage:
-//
-// span.context = span.context.WithBaggageItem(key, value)
-//
-// See SpanContext.WithBaggageItem() for explanation why it's done this way.
-func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
- s.Lock()
- defer s.Unlock()
- s.tracer.setBaggage(s, key, value)
- return s
-}
-
-// BaggageItem implements BaggageItem() of opentracing.SpanContext
-func (s *Span) BaggageItem(key string) string {
- s.RLock()
- defer s.RUnlock()
- return s.context.baggage[key]
-}
-
-// Finish implements opentracing.Span API
-// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
-// so after that, the Span object should no longer be used because it won't be valid anymore.
-func (s *Span) Finish() {
- s.FinishWithOptions(opentracing.FinishOptions{})
-}
-
-// FinishWithOptions implements opentracing.Span API
-func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
- if options.FinishTime.IsZero() {
- options.FinishTime = s.tracer.timeNow()
- }
- s.observer.OnFinish(options)
- s.Lock()
- s.duration = options.FinishTime.Sub(s.startTime)
- ctx := s.context
- s.Unlock()
- if !ctx.isSamplingFinalized() {
- decision := s.tracer.sampler.OnFinishSpan(s)
- s.applySamplingDecision(decision, true)
- }
- if ctx.IsSampled() {
- s.Lock()
- s.fixLogsIfDropped()
- if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
- // Note: bulk logs are not subject to maxLogsPerSpan limit
- if options.LogRecords != nil {
- s.logs = append(s.logs, options.LogRecords...)
- }
- for _, ld := range options.BulkLogData {
- s.logs = append(s.logs, ld.ToLogRecord())
- }
- }
- s.Unlock()
- }
- // call reportSpan even for non-sampled traces, to return span to the pool
- // and update metrics counter
- s.tracer.reportSpan(s)
-}
-
-// Context implements opentracing.Span API
-func (s *Span) Context() opentracing.SpanContext {
- s.Lock()
- defer s.Unlock()
- return s.context
-}
-
-// Tracer implements opentracing.Span API
-func (s *Span) Tracer() opentracing.Tracer {
- return s.tracer
-}
-
-func (s *Span) String() string {
- s.RLock()
- defer s.RUnlock()
- return s.context.String()
-}
-
-// OperationName allows retrieving current operation name.
-func (s *Span) OperationName() string {
- s.RLock()
- defer s.RUnlock()
- return s.operationName
-}
-
-// Retain increases object counter to increase the lifetime of the object
-func (s *Span) Retain() *Span {
- atomic.AddInt32(&s.referenceCounter, 1)
- return s
-}
-
-// Release decrements object counter and return to the
-// allocator manager when counter will below zero
-func (s *Span) Release() {
- if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
- s.tracer.spanAllocator.Put(s)
- }
-}
-
-// reset span state and release unused data
-func (s *Span) reset() {
- s.firstInProcess = false
- s.context = emptyContext
- s.operationName = ""
- s.tracer = nil
- s.startTime = time.Time{}
- s.duration = 0
- s.observer = nil
- atomic.StoreInt32(&s.referenceCounter, 0)
-
- // Note: To reuse memory we can save the pointers on the heap
- s.tags = s.tags[:0]
- s.logs = s.logs[:0]
- s.numDroppedLogs = 0
- s.references = s.references[:0]
-}
-
-func (s *Span) serviceName() string {
- return s.tracer.serviceName
-}
-
-func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
- var ctx SpanContext
- if lock {
- ctx = s.SpanContext()
- } else {
- ctx = s.context
- }
-
- if !decision.Retryable {
- ctx.samplingState.setFinal()
- }
- if decision.Sample {
- ctx.samplingState.setSampled()
- if len(decision.Tags) > 0 {
- if lock {
- s.Lock()
- defer s.Unlock()
- }
- for _, tag := range decision.Tags {
- s.appendTagNoLocking(tag.key, tag.value)
- }
- }
- }
-}
-
-// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
-// The behavior of setSamplingPriority is surprising
-// If noDebugFlagOnForcedSampling is set
-// setSamplingPriority(..., 1) always sets only flagSampled
-// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
-// setSamplingPriority(..., 1) sets both flagSampled and flagDebug
-// However,
-// setSamplingPriority(..., 0) always only resets flagSampled
-//
-// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can
-// leave flagDebug set
-func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool {
- val, ok := value.(uint16)
- if !ok {
- return false
- }
- if val == 0 {
- state.unsetSampled()
- state.setFinal()
- return true
- }
- if tracer.options.noDebugFlagOnForcedSampling {
- state.setSampled()
- state.setFinal()
- return true
- } else if tracer.isDebugAllowed(operationName) {
- state.setDebugAndSampled()
- state.setFinal()
- return true
- }
- return false
-}
-
-// EnableFirehose enables firehose flag on the span context
-func EnableFirehose(s *Span) {
- s.Lock()
- defer s.Unlock()
- s.context.samplingState.setFirehose()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
deleted file mode 100644
index fba1e43379..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2019 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import "sync"
-
-// SpanAllocator abstraction of managing span allocations
-type SpanAllocator interface {
- Get() *Span
- Put(*Span)
-}
-
-type syncPollSpanAllocator struct {
- spanPool sync.Pool
-}
-
-func newSyncPollSpanAllocator() SpanAllocator {
- return &syncPollSpanAllocator{
- spanPool: sync.Pool{New: func() interface{} {
- return &Span{}
- }},
- }
-}
-
-func (pool *syncPollSpanAllocator) Get() *Span {
- return pool.spanPool.Get().(*Span)
-}
-
-func (pool *syncPollSpanAllocator) Put(span *Span) {
- span.reset()
- pool.spanPool.Put(span)
-}
-
-type simpleSpanAllocator struct{}
-
-func (pool simpleSpanAllocator) Get() *Span {
- return &Span{}
-}
-
-func (pool simpleSpanAllocator) Put(span *Span) {
- // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
- // since finished spans are not reused, no need to reset them
- // span.reset()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
deleted file mode 100644
index 5b2307be91..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/span_context.go
+++ /dev/null
@@ -1,418 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "sync"
-
- "go.uber.org/atomic"
-)
-
-const (
- flagSampled = 1
- flagDebug = 2
- flagFirehose = 8
-)
-
-var (
- errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
- errMalformedTracerStateString = errors.New("String does not match tracer state format")
-
- emptyContext = SpanContext{}
-)
-
-// TraceID represents unique 128bit identifier of a trace
-type TraceID struct {
- High, Low uint64
-}
-
-// SpanID represents unique 64bit identifier of a span
-type SpanID uint64
-
-// SpanContext represents propagated span identity and state
-type SpanContext struct {
- // traceID represents globally unique ID of the trace.
- // Usually generated as a random number.
- traceID TraceID
-
- // spanID represents span ID that must be unique within its trace,
- // but does not have to be globally unique.
- spanID SpanID
-
- // parentID refers to the ID of the parent span.
- // Should be 0 if the current span is a root span.
- parentID SpanID
-
- // Distributed Context baggage. The is a snapshot in time.
- baggage map[string]string
-
- // debugID can be set to some correlation ID when the context is being
- // extracted from a TextMap carrier.
- //
- // See JaegerDebugHeader in constants.go
- debugID string
-
- // samplingState is shared across all spans
- samplingState *samplingState
-
- // remote indicates that span context represents a remote parent
- remote bool
-}
-
-type samplingState struct {
- // Span context's state flags that are propagated across processes. Only lower 8 bits are used.
- // We use an int32 instead of byte to be able to use CAS operations.
- stateFlags atomic.Int32
-
- // When state is not final, sampling will be retried on other span write operations,
- // like SetOperationName / SetTag, and the spans will remain writable.
- final atomic.Bool
-
- // localRootSpan stores the SpanID of the first span created in this process for a given trace.
- localRootSpan SpanID
-
- // extendedState allows samplers to keep intermediate state.
- // The keys and values in this map are completely opaque: interface{} -> interface{}.
- extendedState sync.Map
-}
-
-func (s *samplingState) isLocalRootSpan(id SpanID) bool {
- return id == s.localRootSpan
-}
-
-func (s *samplingState) setFlag(newFlag int32) {
- swapped := false
- for !swapped {
- old := s.stateFlags.Load()
- swapped = s.stateFlags.CAS(old, old|newFlag)
- }
-}
-
-func (s *samplingState) unsetFlag(newFlag int32) {
- swapped := false
- for !swapped {
- old := s.stateFlags.Load()
- swapped = s.stateFlags.CAS(old, old&^newFlag)
- }
-}
-
-func (s *samplingState) setSampled() {
- s.setFlag(flagSampled)
-}
-
-func (s *samplingState) unsetSampled() {
- s.unsetFlag(flagSampled)
-}
-
-func (s *samplingState) setDebugAndSampled() {
- s.setFlag(flagDebug | flagSampled)
-}
-
-func (s *samplingState) setFirehose() {
- s.setFlag(flagFirehose)
-}
-
-func (s *samplingState) setFlags(flags byte) {
- s.stateFlags.Store(int32(flags))
-}
-
-func (s *samplingState) setFinal() {
- s.final.Store(true)
-}
-
-func (s *samplingState) flags() byte {
- return byte(s.stateFlags.Load())
-}
-
-func (s *samplingState) isSampled() bool {
- return s.stateFlags.Load()&flagSampled == flagSampled
-}
-
-func (s *samplingState) isDebug() bool {
- return s.stateFlags.Load()&flagDebug == flagDebug
-}
-
-func (s *samplingState) isFirehose() bool {
- return s.stateFlags.Load()&flagFirehose == flagFirehose
-}
-
-func (s *samplingState) isFinal() bool {
- return s.final.Load()
-}
-
-func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
- if value, ok := s.extendedState.Load(key); ok {
- return value
- }
- value := initValue()
- value, _ = s.extendedState.LoadOrStore(key, value)
- return value
-}
-
-// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
-func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
- for k, v := range c.baggage {
- if !handler(k, v) {
- break
- }
- }
-}
-
-// IsSampled returns whether this trace was chosen for permanent storage
-// by the sampling mechanism of the tracer.
-func (c SpanContext) IsSampled() bool {
- return c.samplingState.isSampled()
-}
-
-// IsDebug indicates whether sampling was explicitly requested by the service.
-func (c SpanContext) IsDebug() bool {
- return c.samplingState.isDebug()
-}
-
-// IsSamplingFinalized indicates whether the sampling decision has been finalized.
-func (c SpanContext) IsSamplingFinalized() bool {
- return c.samplingState.isFinal()
-}
-
-// IsFirehose indicates whether the firehose flag was set
-func (c SpanContext) IsFirehose() bool {
- return c.samplingState.isFirehose()
-}
-
-// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
-// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
-func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
- return c.samplingState.extendedStateForKey(key, initValue)
-}
-
-// IsValid indicates whether this context actually represents a valid trace.
-func (c SpanContext) IsValid() bool {
- return c.traceID.IsValid() && c.spanID != 0
-}
-
-// SetFirehose enables firehose mode for this trace.
-func (c SpanContext) SetFirehose() {
- c.samplingState.setFirehose()
-}
-
-func (c SpanContext) String() string {
- var flags int32
- if c.samplingState != nil {
- flags = c.samplingState.stateFlags.Load()
- }
- if c.traceID.High == 0 {
- return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
- }
- return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
-}
-
-// ContextFromString reconstructs the Context encoded in a string
-func ContextFromString(value string) (SpanContext, error) {
- var context SpanContext
- if value == "" {
- return emptyContext, errEmptyTracerStateString
- }
- parts := strings.Split(value, ":")
- if len(parts) != 4 {
- return emptyContext, errMalformedTracerStateString
- }
- var err error
- if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
- return emptyContext, err
- }
- if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
- return emptyContext, err
- }
- if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
- return emptyContext, err
- }
- flags, err := strconv.ParseUint(parts[3], 10, 8)
- if err != nil {
- return emptyContext, err
- }
- context.samplingState = &samplingState{}
- context.samplingState.setFlags(byte(flags))
- return context, nil
-}
-
-// TraceID returns the trace ID of this span context
-func (c SpanContext) TraceID() TraceID {
- return c.traceID
-}
-
-// SpanID returns the span ID of this span context
-func (c SpanContext) SpanID() SpanID {
- return c.spanID
-}
-
-// ParentID returns the parent span ID of this span context
-func (c SpanContext) ParentID() SpanID {
- return c.parentID
-}
-
-// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
-func (c SpanContext) Flags() byte {
- return c.samplingState.flags()
-}
-
-// Span can be written to if it is sampled or the sampling decision has not been finalized.
-func (c SpanContext) isWriteable() bool {
- state := c.samplingState
- return !state.isFinal() || state.isSampled()
-}
-
-func (c SpanContext) isSamplingFinalized() bool {
- return c.samplingState.isFinal()
-}
-
-// NewSpanContext creates a new instance of SpanContext
-func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
- samplingState := &samplingState{}
- if sampled {
- samplingState.setSampled()
- }
-
- return SpanContext{
- traceID: traceID,
- spanID: spanID,
- parentID: parentID,
- samplingState: samplingState,
- baggage: baggage}
-}
-
-// CopyFrom copies data from ctx into this context, including span identity and baggage.
-// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
-func (c *SpanContext) CopyFrom(ctx *SpanContext) {
- c.traceID = ctx.traceID
- c.spanID = ctx.spanID
- c.parentID = ctx.parentID
- c.samplingState = ctx.samplingState
- if l := len(ctx.baggage); l > 0 {
- c.baggage = make(map[string]string, l)
- for k, v := range ctx.baggage {
- c.baggage[k] = v
- }
- } else {
- c.baggage = nil
- }
-}
-
-// WithBaggageItem creates a new context with an extra baggage item.
-// Delete a baggage item if provided blank value.
-//
-// The SpanContext is designed to be immutable and passed by value. As such,
-// it cannot contain any locks, and should only hold immutable data, including baggage.
-// Another reason for why baggage is immutable is when the span context is passed
-// as a parent when starting a new span. The new span's baggage cannot affect the parent
-// span's baggage, so the child span either needs to take a copy of the parent baggage
-// (which is expensive and unnecessary since baggage rarely changes in the life span of
-// a trace), or it needs to do a copy-on-write, which is the approach taken here.
-func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
- var newBaggage map[string]string
- // unset baggage item
- if value == "" {
- if _, ok := c.baggage[key]; !ok {
- return c
- }
- newBaggage = make(map[string]string, len(c.baggage))
- for k, v := range c.baggage {
- newBaggage[k] = v
- }
- delete(newBaggage, key)
- return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
- }
- if c.baggage == nil {
- newBaggage = map[string]string{key: value}
- } else {
- newBaggage = make(map[string]string, len(c.baggage)+1)
- for k, v := range c.baggage {
- newBaggage[k] = v
- }
- newBaggage[key] = value
- }
- // Use positional parameters so the compiler will help catch new fields.
- return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
-}
-
-// isDebugIDContainerOnly returns true when the instance of the context is only
-// used to return the debug/correlation ID from extract() method. This happens
-// in the situation when "jaeger-debug-id" header is passed in the carrier to
-// the extract() method, but the request otherwise has no span context in it.
-// Previously this would've returned opentracing.ErrSpanContextNotFound from the
-// extract method, but now it returns a dummy context with only debugID filled in.
-//
-// See JaegerDebugHeader in constants.go
-// See TextMapPropagator#Extract
-func (c *SpanContext) isDebugIDContainerOnly() bool {
- return !c.traceID.IsValid() && c.debugID != ""
-}
-
-// ------- TraceID -------
-
-func (t TraceID) String() string {
- if t.High == 0 {
- return fmt.Sprintf("%016x", t.Low)
- }
- return fmt.Sprintf("%016x%016x", t.High, t.Low)
-}
-
-// TraceIDFromString creates a TraceID from a hexadecimal string
-func TraceIDFromString(s string) (TraceID, error) {
- var hi, lo uint64
- var err error
- if len(s) > 32 {
- return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
- } else if len(s) > 16 {
- hiLen := len(s) - 16
- if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
- return TraceID{}, err
- }
- if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
- return TraceID{}, err
- }
- } else {
- if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
- return TraceID{}, err
- }
- }
- return TraceID{High: hi, Low: lo}, nil
-}
-
-// IsValid checks if the trace ID is valid, i.e. not zero.
-func (t TraceID) IsValid() bool {
- return t.High != 0 || t.Low != 0
-}
-
-// ------- SpanID -------
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%016x", uint64(s))
-}
-
-// SpanIDFromString creates a SpanID from a hexadecimal string
-func SpanIDFromString(s string) (SpanID, error) {
- if len(s) > 16 {
- return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
- }
- id, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return SpanID(0), err
- }
- return SpanID(id), nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
deleted file mode 100644
index 54cd3b0867..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
deleted file mode 100644
index a0df507797..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
- "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
-
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-
-func init() {
-}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
deleted file mode 100644
index 6472e84e69..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
- "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
-
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-type Agent interface {
- // Parameters:
- // - Spans
- EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
- // Parameters:
- // - Batch
- EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
-}
-
-type AgentClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
- return &AgentClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
- return &AgentClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewAgentClient(c thrift.TClient) *AgentClient {
- return &AgentClient{
- c: c,
- }
-}
-
-func (p *AgentClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Spans
-func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
- var _args0 AgentEmitZipkinBatchArgs
- _args0.Spans = spans
- p.SetLastResponseMeta_(thrift.ResponseMeta{})
- if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
- return err
- }
- return nil
-}
-
-// Parameters:
-// - Batch
-func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
- var _args1 AgentEmitBatchArgs
- _args1.Batch = batch
- p.SetLastResponseMeta_(thrift.ResponseMeta{})
- if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
- return err
- }
- return nil
-}
-
-type AgentProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Agent
-}
-
-func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewAgentProcessor(handler Agent) *AgentProcessor {
-
- self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
- self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
- self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
-return self2
-}
-
-func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil { return false, thrift.WrapTException(err2) }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x3.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x3
-
-}
-
-type agentProcessorEmitZipkinBatch struct {
- handler Agent
-}
-
-func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitZipkinBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- _ = tickerCancel
-
- if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
- tickerCancel()
- return true, thrift.WrapTException(err2)
- }
- tickerCancel()
- return true, nil
-}
-
-type agentProcessorEmitBatch struct {
- handler Agent
-}
-
-func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := AgentEmitBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- _ = tickerCancel
-
- if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
- tickerCancel()
- return true, thrift.WrapTException(err2)
- }
- tickerCancel()
- return true, nil
-}
-
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Spans
-type AgentEmitZipkinBatchArgs struct {
- Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
- return &AgentEmitZipkinBatchArgs{}
-}
-
-
-func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
- return p.Spans
-}
-func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*zipkincore.Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i ++ {
- _elem4 := &zipkincore.Span{}
- if err := _elem4.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Spans = append(p.Spans, _elem4)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
- return err
-}
-
-func (p *AgentEmitZipkinBatchArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Batch
-type AgentEmitBatchArgs struct {
- Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
-}
-
-func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
- return &AgentEmitBatchArgs{}
-}
-
-var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
-func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
- if !p.IsSetBatch() {
- return AgentEmitBatchArgs_Batch_DEFAULT
- }
-return p.Batch
-}
-func (p *AgentEmitBatchArgs) IsSetBatch() bool {
- return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- p.Batch = &jaeger.Batch{}
- if err := p.Batch.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
- }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
- if err := p.Batch.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
- return err
-}
-
-func (p *AgentEmitBatchArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
-}
-
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
deleted file mode 100644
index 712b6a9da4..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package baggage
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
deleted file mode 100644
index 39b5a7ee79..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package baggage
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-
-func init() {
-}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
deleted file mode 100644
index e4d89d5d51..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
+++ /dev/null
@@ -1,565 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package baggage
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-// Attributes:
-// - BaggageKey
-// - MaxValueLength
-type BaggageRestriction struct {
- BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"`
- MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"`
-}
-
-func NewBaggageRestriction() *BaggageRestriction {
- return &BaggageRestriction{}
-}
-
-
-func (p *BaggageRestriction) GetBaggageKey() string {
- return p.BaggageKey
-}
-
-func (p *BaggageRestriction) GetMaxValueLength() int32 {
- return p.MaxValueLength
-}
-func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetBaggageKey bool = false;
- var issetMaxValueLength bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetBaggageKey = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetMaxValueLength = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetBaggageKey{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"));
- }
- if !issetMaxValueLength{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"));
- }
- return nil
-}
-
-func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.BaggageKey = v
-}
- return nil
-}
-
-func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.MaxValueLength = v
-}
- return nil
-}
-
-func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) }
- return err
-}
-
-func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) }
- return err
-}
-
-func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.BaggageKey != other.BaggageKey { return false }
- if p.MaxValueLength != other.MaxValueLength { return false }
- return true
-}
-
-func (p *BaggageRestriction) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BaggageRestriction(%+v)", *p)
-}
-
-type BaggageRestrictionManager interface {
- // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
- // Usually, baggageRestrictions apply to all services however there may be situations
- // where a baggageKey might only be allowed to be set by a specific service.
- //
- // Parameters:
- // - ServiceName
- GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error)
-}
-
-type BaggageRestrictionManagerClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
- return &BaggageRestrictionManagerClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
- return &BaggageRestrictionManagerClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient {
- return &BaggageRestrictionManagerClient{
- c: c,
- }
-}
-
-func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
-// Usually, baggageRestrictions apply to all services however there may be situations
-// where a baggageKey might only be allowed to be set by a specific service.
-//
-// Parameters:
-// - ServiceName
-func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) {
- var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs
- _args0.ServiceName = serviceName
- var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult
- var _meta1 thrift.ResponseMeta
- _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2)
- p.SetLastResponseMeta_(_meta1)
- if _err != nil {
- return
- }
- return _result2.GetSuccess(), nil
-}
-
-type BaggageRestrictionManagerProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler BaggageRestrictionManager
-}
-
-func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
-
- self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
- self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler}
-return self3
-}
-
-func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil { return false, thrift.WrapTException(err2) }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x4.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x4
-
-}
-
-type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
- handler BaggageRestrictionManager
-}
-
-func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
- var retval []*BaggageRestriction
- if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error())
- oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - ServiceName
-type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
- ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
- return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
-}
-
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
- return p.ServiceName
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.ServiceName = v
-}
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
- return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
- Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
- return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
-}
-
-var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
- return p.Success
-}
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BaggageRestriction, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i ++ {
- _elem5 := &BaggageRestriction{}
- if err := _elem5.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
- }
- p.Success = append(p.Success, _elem5)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
- }
- return err
-}
-
-func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
-}
-
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
deleted file mode 100644
index fe45a9f9ad..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
deleted file mode 100644
index b6ce85570b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-
-func init() {
-}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
deleted file mode 100644
index d55cca0241..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
+++ /dev/null
@@ -1,2698 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import(
- "bytes"
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type TagType int64
-const (
- TagType_STRING TagType = 0
- TagType_DOUBLE TagType = 1
- TagType_BOOL TagType = 2
- TagType_LONG TagType = 3
- TagType_BINARY TagType = 4
-)
-
-func (p TagType) String() string {
- switch p {
- case TagType_STRING: return "STRING"
- case TagType_DOUBLE: return "DOUBLE"
- case TagType_BOOL: return "BOOL"
- case TagType_LONG: return "LONG"
- case TagType_BINARY: return "BINARY"
- }
- return ""
-}
-
-func TagTypeFromString(s string) (TagType, error) {
- switch s {
- case "STRING": return TagType_STRING, nil
- case "DOUBLE": return TagType_DOUBLE, nil
- case "BOOL": return TagType_BOOL, nil
- case "LONG": return TagType_LONG, nil
- case "BINARY": return TagType_BINARY, nil
- }
- return TagType(0), fmt.Errorf("not a valid TagType string")
-}
-
-
-func TagTypePtr(v TagType) *TagType { return &v }
-
-func (p TagType) MarshalText() ([]byte, error) {
-return []byte(p.String()), nil
-}
-
-func (p *TagType) UnmarshalText(text []byte) error {
-q, err := TagTypeFromString(string(text))
-if (err != nil) {
-return err
-}
-*p = q
-return nil
-}
-
-func (p *TagType) Scan(value interface{}) error {
-v, ok := value.(int64)
-if !ok {
-return errors.New("Scan value is not int64")
-}
-*p = TagType(v)
-return nil
-}
-
-func (p * TagType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
-return int64(*p), nil
-}
-type SpanRefType int64
-const (
- SpanRefType_CHILD_OF SpanRefType = 0
- SpanRefType_FOLLOWS_FROM SpanRefType = 1
-)
-
-func (p SpanRefType) String() string {
- switch p {
- case SpanRefType_CHILD_OF: return "CHILD_OF"
- case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM"
- }
- return ""
-}
-
-func SpanRefTypeFromString(s string) (SpanRefType, error) {
- switch s {
- case "CHILD_OF": return SpanRefType_CHILD_OF, nil
- case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil
- }
- return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
-}
-
-
-func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
-
-func (p SpanRefType) MarshalText() ([]byte, error) {
-return []byte(p.String()), nil
-}
-
-func (p *SpanRefType) UnmarshalText(text []byte) error {
-q, err := SpanRefTypeFromString(string(text))
-if (err != nil) {
-return err
-}
-*p = q
-return nil
-}
-
-func (p *SpanRefType) Scan(value interface{}) error {
-v, ok := value.(int64)
-if !ok {
-return errors.New("Scan value is not int64")
-}
-*p = SpanRefType(v)
-return nil
-}
-
-func (p * SpanRefType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
-return int64(*p), nil
-}
-// Attributes:
-// - Key
-// - VType
-// - VStr
-// - VDouble
-// - VBool
-// - VLong
-// - VBinary
-type Tag struct {
- Key string `thrift:"key,1,required" db:"key" json:"key"`
- VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
- VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
- VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
- VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
- VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
- VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
-}
-
-func NewTag() *Tag {
- return &Tag{}
-}
-
-
-func (p *Tag) GetKey() string {
- return p.Key
-}
-
-func (p *Tag) GetVType() TagType {
- return p.VType
-}
-var Tag_VStr_DEFAULT string
-func (p *Tag) GetVStr() string {
- if !p.IsSetVStr() {
- return Tag_VStr_DEFAULT
- }
-return *p.VStr
-}
-var Tag_VDouble_DEFAULT float64
-func (p *Tag) GetVDouble() float64 {
- if !p.IsSetVDouble() {
- return Tag_VDouble_DEFAULT
- }
-return *p.VDouble
-}
-var Tag_VBool_DEFAULT bool
-func (p *Tag) GetVBool() bool {
- if !p.IsSetVBool() {
- return Tag_VBool_DEFAULT
- }
-return *p.VBool
-}
-var Tag_VLong_DEFAULT int64
-func (p *Tag) GetVLong() int64 {
- if !p.IsSetVLong() {
- return Tag_VLong_DEFAULT
- }
-return *p.VLong
-}
-var Tag_VBinary_DEFAULT []byte
-
-func (p *Tag) GetVBinary() []byte {
- return p.VBinary
-}
-func (p *Tag) IsSetVStr() bool {
- return p.VStr != nil
-}
-
-func (p *Tag) IsSetVDouble() bool {
- return p.VDouble != nil
-}
-
-func (p *Tag) IsSetVBool() bool {
- return p.VBool != nil
-}
-
-func (p *Tag) IsSetVLong() bool {
- return p.VLong != nil
-}
-
-func (p *Tag) IsSetVBinary() bool {
- return p.VBinary != nil
-}
-
-func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetKey bool = false;
- var issetVType bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetKey = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetVType = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 7:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField7(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetKey{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"));
- }
- if !issetVType{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"));
- }
- return nil
-}
-
-func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Key = v
-}
- return nil
-}
-
-func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- temp := TagType(v)
- p.VType = temp
-}
- return nil
-}
-
-func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.VStr = &v
-}
- return nil
-}
-
-func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.VDouble = &v
-}
- return nil
-}
-
-func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
-} else {
- p.VBool = &v
-}
- return nil
-}
-
-func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 6: ", err)
-} else {
- p.VLong = &v
-}
- return nil
-}
-
-func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
-} else {
- p.VBinary = v
-}
- return nil
-}
-
-func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- if err := p.writeField5(ctx, oprot); err != nil { return err }
- if err := p.writeField6(ctx, oprot); err != nil { return err }
- if err := p.writeField7(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
- return err
-}
-
-func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) }
- return err
-}
-
-func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVStr() {
- if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) }
- if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) }
- }
- return err
-}
-
-func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVDouble() {
- if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) }
- if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) }
- }
- return err
-}
-
-func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVBool() {
- if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) }
- if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) }
- }
- return err
-}
-
-func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVLong() {
- if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) }
- }
- return err
-}
-
-func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetVBinary() {
- if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) }
- if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) }
- }
- return err
-}
-
-func (p *Tag) Equals(other *Tag) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Key != other.Key { return false }
- if p.VType != other.VType { return false }
- if p.VStr != other.VStr {
- if p.VStr == nil || other.VStr == nil {
- return false
- }
- if (*p.VStr) != (*other.VStr) { return false }
- }
- if p.VDouble != other.VDouble {
- if p.VDouble == nil || other.VDouble == nil {
- return false
- }
- if (*p.VDouble) != (*other.VDouble) { return false }
- }
- if p.VBool != other.VBool {
- if p.VBool == nil || other.VBool == nil {
- return false
- }
- if (*p.VBool) != (*other.VBool) { return false }
- }
- if p.VLong != other.VLong {
- if p.VLong == nil || other.VLong == nil {
- return false
- }
- if (*p.VLong) != (*other.VLong) { return false }
- }
- if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false }
- return true
-}
-
-func (p *Tag) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Tag(%+v)", *p)
-}
-
-// Attributes:
-// - Timestamp
-// - Fields
-type Log struct {
- Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
- Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
-}
-
-func NewLog() *Log {
- return &Log{}
-}
-
-
-func (p *Log) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Log) GetFields() []*Tag {
- return p.Fields
-}
-func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTimestamp bool = false;
- var issetFields bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetTimestamp = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetFields = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTimestamp{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"));
- }
- if !issetFields{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"));
- }
- return nil
-}
-
-func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Timestamp = v
-}
- return nil
-}
-
-func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Fields = tSlice
- for i := 0; i < size; i ++ {
- _elem0 := &Tag{}
- if err := _elem0.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Fields = append(p.Fields, _elem0)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
- return err
-}
-
-func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Fields {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) }
- return err
-}
-
-func (p *Log) Equals(other *Log) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Timestamp != other.Timestamp { return false }
- if len(p.Fields) != len(other.Fields) { return false }
- for i, _tgt := range p.Fields {
- _src1 := other.Fields[i]
- if !_tgt.Equals(_src1) { return false }
- }
- return true
-}
-
-func (p *Log) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Log(%+v)", *p)
-}
-
-// Attributes:
-// - RefType
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-type SpanRef struct {
- RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
- TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
-}
-
-func NewSpanRef() *SpanRef {
- return &SpanRef{}
-}
-
-
-func (p *SpanRef) GetRefType() SpanRefType {
- return p.RefType
-}
-
-func (p *SpanRef) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *SpanRef) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *SpanRef) GetSpanId() int64 {
- return p.SpanId
-}
-func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetRefType bool = false;
- var issetTraceIdLow bool = false;
- var issetTraceIdHigh bool = false;
- var issetSpanId bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetRefType = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- issetSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetRefType{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"));
- }
- if !issetTraceIdLow{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
- }
- if !issetTraceIdHigh{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
- }
- if !issetSpanId{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
- }
- return nil
-}
-
-func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- temp := SpanRefType(v)
- p.RefType = temp
-}
- return nil
-}
-
-func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.TraceIdLow = v
-}
- return nil
-}
-
-func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.TraceIdHigh = v
-}
- return nil
-}
-
-func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.SpanId = v
-}
- return nil
-}
-
-func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) }
- return err
-}
-
-func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) }
- return err
-}
-
-func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) }
- return err
-}
-
-func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) }
- return err
-}
-
-func (p *SpanRef) Equals(other *SpanRef) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.RefType != other.RefType { return false }
- if p.TraceIdLow != other.TraceIdLow { return false }
- if p.TraceIdHigh != other.TraceIdHigh { return false }
- if p.SpanId != other.SpanId { return false }
- return true
-}
-
-func (p *SpanRef) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("SpanRef(%+v)", *p)
-}
-
-// Attributes:
-// - TraceIdLow
-// - TraceIdHigh
-// - SpanId
-// - ParentSpanId
-// - OperationName
-// - References
-// - Flags
-// - StartTime
-// - Duration
-// - Tags
-// - Logs
-type Span struct {
- TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
- TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
- SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
- ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
- OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
- References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
- Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
- StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
- Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
- Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
- Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-
-func (p *Span) GetTraceIdLow() int64 {
- return p.TraceIdLow
-}
-
-func (p *Span) GetTraceIdHigh() int64 {
- return p.TraceIdHigh
-}
-
-func (p *Span) GetSpanId() int64 {
- return p.SpanId
-}
-
-func (p *Span) GetParentSpanId() int64 {
- return p.ParentSpanId
-}
-
-func (p *Span) GetOperationName() string {
- return p.OperationName
-}
-var Span_References_DEFAULT []*SpanRef
-
-func (p *Span) GetReferences() []*SpanRef {
- return p.References
-}
-
-func (p *Span) GetFlags() int32 {
- return p.Flags
-}
-
-func (p *Span) GetStartTime() int64 {
- return p.StartTime
-}
-
-func (p *Span) GetDuration() int64 {
- return p.Duration
-}
-var Span_Tags_DEFAULT []*Tag
-
-func (p *Span) GetTags() []*Tag {
- return p.Tags
-}
-var Span_Logs_DEFAULT []*Log
-
-func (p *Span) GetLogs() []*Log {
- return p.Logs
-}
-func (p *Span) IsSetReferences() bool {
- return p.References != nil
-}
-
-func (p *Span) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Span) IsSetLogs() bool {
- return p.Logs != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetTraceIdLow bool = false;
- var issetTraceIdHigh bool = false;
- var issetSpanId bool = false;
- var issetParentSpanId bool = false;
- var issetOperationName bool = false;
- var issetFlags bool = false;
- var issetStartTime bool = false;
- var issetDuration bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdLow = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTraceIdHigh = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- issetParentSpanId = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- issetOperationName = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 7:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField7(ctx, iprot); err != nil {
- return err
- }
- issetFlags = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 8:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField8(ctx, iprot); err != nil {
- return err
- }
- issetStartTime = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 9:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField9(ctx, iprot); err != nil {
- return err
- }
- issetDuration = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 10:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField10(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 11:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField11(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetTraceIdLow{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
- }
- if !issetTraceIdHigh{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
- }
- if !issetSpanId{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
- }
- if !issetParentSpanId{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"));
- }
- if !issetOperationName{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"));
- }
- if !issetFlags{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"));
- }
- if !issetStartTime{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"));
- }
- if !issetDuration{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"));
- }
- return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.TraceIdLow = v
-}
- return nil
-}
-
-func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.TraceIdHigh = v
-}
- return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.SpanId = v
-}
- return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.ParentSpanId = v
-}
- return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
-} else {
- p.OperationName = v
-}
- return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*SpanRef, 0, size)
- p.References = tSlice
- for i := 0; i < size; i ++ {
- _elem2 := &SpanRef{}
- if err := _elem2.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
- }
- p.References = append(p.References, _elem2)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 7: ", err)
-} else {
- p.Flags = v
-}
- return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 8: ", err)
-} else {
- p.StartTime = v
-}
- return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
-} else {
- p.Duration = v
-}
- return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i ++ {
- _elem3 := &Tag{}
- if err := _elem3.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
- }
- p.Tags = append(p.Tags, _elem3)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Log, 0, size)
- p.Logs = tSlice
- for i := 0; i < size; i ++ {
- _elem4 := &Log{}
- if err := _elem4.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Logs = append(p.Logs, _elem4)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- if err := p.writeField5(ctx, oprot); err != nil { return err }
- if err := p.writeField6(ctx, oprot); err != nil { return err }
- if err := p.writeField7(ctx, oprot); err != nil { return err }
- if err := p.writeField8(ctx, oprot); err != nil { return err }
- if err := p.writeField9(ctx, oprot); err != nil { return err }
- if err := p.writeField10(ctx, oprot); err != nil { return err }
- if err := p.writeField11(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) }
- return err
-}
-
-func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) }
- return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) }
- return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) }
- return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) }
- return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetReferences() {
- if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.References {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) }
- return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) }
- return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) }
- return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetLogs() {
- if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Logs {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) }
- }
- return err
-}
-
-func (p *Span) Equals(other *Span) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.TraceIdLow != other.TraceIdLow { return false }
- if p.TraceIdHigh != other.TraceIdHigh { return false }
- if p.SpanId != other.SpanId { return false }
- if p.ParentSpanId != other.ParentSpanId { return false }
- if p.OperationName != other.OperationName { return false }
- if len(p.References) != len(other.References) { return false }
- for i, _tgt := range p.References {
- _src5 := other.References[i]
- if !_tgt.Equals(_src5) { return false }
- }
- if p.Flags != other.Flags { return false }
- if p.StartTime != other.StartTime { return false }
- if p.Duration != other.Duration { return false }
- if len(p.Tags) != len(other.Tags) { return false }
- for i, _tgt := range p.Tags {
- _src6 := other.Tags[i]
- if !_tgt.Equals(_src6) { return false }
- }
- if len(p.Logs) != len(other.Logs) { return false }
- for i, _tgt := range p.Logs {
- _src7 := other.Logs[i]
- if !_tgt.Equals(_src7) { return false }
- }
- return true
-}
-
-func (p *Span) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - ServiceName
-// - Tags
-type Process struct {
- ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
- Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
-}
-
-func NewProcess() *Process {
- return &Process{}
-}
-
-
-func (p *Process) GetServiceName() string {
- return p.ServiceName
-}
-var Process_Tags_DEFAULT []*Tag
-
-func (p *Process) GetTags() []*Tag {
- return p.Tags
-}
-func (p *Process) IsSetTags() bool {
- return p.Tags != nil
-}
-
-func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetServiceName bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetServiceName = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetServiceName{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"));
- }
- return nil
-}
-
-func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.ServiceName = v
-}
- return nil
-}
-
-func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Tag, 0, size)
- p.Tags = tSlice
- for i := 0; i < size; i ++ {
- _elem8 := &Tag{}
- if err := _elem8.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
- }
- p.Tags = append(p.Tags, _elem8)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
- return err
-}
-
-func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTags() {
- if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Tags {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) }
- }
- return err
-}
-
-func (p *Process) Equals(other *Process) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.ServiceName != other.ServiceName { return false }
- if len(p.Tags) != len(other.Tags) { return false }
- for i, _tgt := range p.Tags {
- _src9 := other.Tags[i]
- if !_tgt.Equals(_src9) { return false }
- }
- return true
-}
-
-func (p *Process) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Process(%+v)", *p)
-}
-
-// Attributes:
-// - FullQueueDroppedSpans
-// - TooLargeDroppedSpans
-// - FailedToEmitSpans
-type ClientStats struct {
- FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
- TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
- FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
-}
-
-func NewClientStats() *ClientStats {
- return &ClientStats{}
-}
-
-
-func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
- return p.FullQueueDroppedSpans
-}
-
-func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
- return p.TooLargeDroppedSpans
-}
-
-func (p *ClientStats) GetFailedToEmitSpans() int64 {
- return p.FailedToEmitSpans
-}
-func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetFullQueueDroppedSpans bool = false;
- var issetTooLargeDroppedSpans bool = false;
- var issetFailedToEmitSpans bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetFullQueueDroppedSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetTooLargeDroppedSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetFailedToEmitSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetFullQueueDroppedSpans{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"));
- }
- if !issetTooLargeDroppedSpans{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"));
- }
- if !issetFailedToEmitSpans{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"));
- }
- return nil
-}
-
-func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.FullQueueDroppedSpans = v
-}
- return nil
-}
-
-func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.TooLargeDroppedSpans = v
-}
- return nil
-}
-
-func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.FailedToEmitSpans = v
-}
- return nil
-}
-
-func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) }
- return err
-}
-
-func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) }
- return err
-}
-
-func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) }
- return err
-}
-
-func (p *ClientStats) Equals(other *ClientStats) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false }
- if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false }
- if p.FailedToEmitSpans != other.FailedToEmitSpans { return false }
- return true
-}
-
-func (p *ClientStats) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ClientStats(%+v)", *p)
-}
-
-// Attributes:
-// - Process
-// - Spans
-// - SeqNo
-// - Stats
-type Batch struct {
- Process *Process `thrift:"process,1,required" db:"process" json:"process"`
- Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
- SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
- Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
-}
-
-func NewBatch() *Batch {
- return &Batch{}
-}
-
-var Batch_Process_DEFAULT *Process
-func (p *Batch) GetProcess() *Process {
- if !p.IsSetProcess() {
- return Batch_Process_DEFAULT
- }
-return p.Process
-}
-
-func (p *Batch) GetSpans() []*Span {
- return p.Spans
-}
-var Batch_SeqNo_DEFAULT int64
-func (p *Batch) GetSeqNo() int64 {
- if !p.IsSetSeqNo() {
- return Batch_SeqNo_DEFAULT
- }
-return *p.SeqNo
-}
-var Batch_Stats_DEFAULT *ClientStats
-func (p *Batch) GetStats() *ClientStats {
- if !p.IsSetStats() {
- return Batch_Stats_DEFAULT
- }
-return p.Stats
-}
-func (p *Batch) IsSetProcess() bool {
- return p.Process != nil
-}
-
-func (p *Batch) IsSetSeqNo() bool {
- return p.SeqNo != nil
-}
-
-func (p *Batch) IsSetStats() bool {
- return p.Stats != nil
-}
-
-func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetProcess bool = false;
- var issetSpans bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetProcess = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetSpans = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetProcess{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"));
- }
- if !issetSpans{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"));
- }
- return nil
-}
-
-func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- p.Process = &Process{}
- if err := p.Process.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
- }
- return nil
-}
-
-func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i ++ {
- _elem10 := &Span{}
- if err := _elem10.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
- }
- p.Spans = append(p.Spans, _elem10)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.SeqNo = &v
-}
- return nil
-}
-
-func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- p.Stats = &ClientStats{}
- if err := p.Stats.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
- }
- return nil
-}
-
-func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) }
- if err := p.Process.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) }
- return err
-}
-
-func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) }
- return err
-}
-
-func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSeqNo() {
- if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) }
- }
- return err
-}
-
-func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetStats() {
- if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) }
- if err := p.Stats.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) }
- }
- return err
-}
-
-func (p *Batch) Equals(other *Batch) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if !p.Process.Equals(other.Process) { return false }
- if len(p.Spans) != len(other.Spans) { return false }
- for i, _tgt := range p.Spans {
- _src11 := other.Spans[i]
- if !_tgt.Equals(_src11) { return false }
- }
- if p.SeqNo != other.SeqNo {
- if p.SeqNo == nil || other.SeqNo == nil {
- return false
- }
- if (*p.SeqNo) != (*other.SeqNo) { return false }
- }
- if !p.Stats.Equals(other.Stats) { return false }
- return true
-}
-
-func (p *Batch) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Batch(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type BatchSubmitResponse struct {
- Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewBatchSubmitResponse() *BatchSubmitResponse {
- return &BatchSubmitResponse{}
-}
-
-
-func (p *BatchSubmitResponse) GetOk() bool {
- return p.Ok
-}
-func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetOk = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
- }
- return nil
-}
-
-func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Ok = v
-}
- return nil
-}
-
-func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
- if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
- return err
-}
-
-func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ok != other.Ok { return false }
- return true
-}
-
-func (p *BatchSubmitResponse) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
-}
-
-type Collector interface {
- // Parameters:
- // - Batches
- SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
-}
-
-type CollectorClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
- return &CollectorClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
- return &CollectorClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewCollectorClient(c thrift.TClient) *CollectorClient {
- return &CollectorClient{
- c: c,
- }
-}
-
-func (p *CollectorClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Batches
-func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
- var _args12 CollectorSubmitBatchesArgs
- _args12.Batches = batches
- var _result14 CollectorSubmitBatchesResult
- var _meta13 thrift.ResponseMeta
- _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
- p.SetLastResponseMeta_(_meta13)
- if _err != nil {
- return
- }
- return _result14.GetSuccess(), nil
-}
-
-type CollectorProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Collector
-}
-
-func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewCollectorProcessor(handler Collector) *CollectorProcessor {
-
- self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
- self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler}
-return self15
-}
-
-func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil { return false, thrift.WrapTException(err2) }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x16.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x16
-
-}
-
-type collectorProcessorSubmitBatches struct {
- handler Collector
-}
-
-func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := CollectorSubmitBatchesArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := CollectorSubmitBatchesResult{}
- var retval []*BatchSubmitResponse
- if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error())
- oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Batches
-type CollectorSubmitBatchesArgs struct {
- Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
-}
-
-func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
- return &CollectorSubmitBatchesArgs{}
-}
-
-
-func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
- return p.Batches
-}
-func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Batch, 0, size)
- p.Batches = tSlice
- for i := 0; i < size; i ++ {
- _elem17 := &Batch{}
- if err := _elem17.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
- }
- p.Batches = append(p.Batches, _elem17)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Batches {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) }
- return err
-}
-
-func (p *CollectorSubmitBatchesArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type CollectorSubmitBatchesResult struct {
- Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
- return &CollectorSubmitBatchesResult{}
-}
-
-var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
-
-func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
- return p.Success
-}
-func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BatchSubmitResponse, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i ++ {
- _elem18 := &BatchSubmitResponse{}
- if err := _elem18.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
- }
- p.Success = append(p.Success, _elem18)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
- }
- return err
-}
-
-func (p *CollectorSubmitBatchesResult) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
-}
-
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
deleted file mode 100644
index 015ad4b067..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package sampling
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
deleted file mode 100644
index 5cc762824e..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package sampling
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-
-func init() {
-}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
deleted file mode 100644
index 3bffa5b8ee..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package sampling
-
-import(
- "bytes"
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type SamplingStrategyType int64
-const (
- SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
- SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
-)
-
-func (p SamplingStrategyType) String() string {
- switch p {
- case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC"
- case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING"
- }
- return ""
-}
-
-func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
- switch s {
- case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil
- case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil
- }
- return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
-}
-
-
-func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
-
-func (p SamplingStrategyType) MarshalText() ([]byte, error) {
-return []byte(p.String()), nil
-}
-
-func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
-q, err := SamplingStrategyTypeFromString(string(text))
-if (err != nil) {
-return err
-}
-*p = q
-return nil
-}
-
-func (p *SamplingStrategyType) Scan(value interface{}) error {
-v, ok := value.(int64)
-if !ok {
-return errors.New("Scan value is not int64")
-}
-*p = SamplingStrategyType(v)
-return nil
-}
-
-func (p * SamplingStrategyType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
-return int64(*p), nil
-}
-// Attributes:
-// - SamplingRate
-type ProbabilisticSamplingStrategy struct {
- SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"`
-}
-
-func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
- return &ProbabilisticSamplingStrategy{}
-}
-
-
-func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
- return p.SamplingRate
-}
-func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetSamplingRate bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetSamplingRate = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetSamplingRate{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"));
- }
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.SamplingRate = v
-}
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) }
- if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) }
- return err
-}
-
-func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.SamplingRate != other.SamplingRate { return false }
- return true
-}
-
-func (p *ProbabilisticSamplingStrategy) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - MaxTracesPerSecond
-type RateLimitingSamplingStrategy struct {
- MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"`
-}
-
-func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
- return &RateLimitingSamplingStrategy{}
-}
-
-
-func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
- return p.MaxTracesPerSecond
-}
-func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetMaxTracesPerSecond bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I16 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetMaxTracesPerSecond = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetMaxTracesPerSecond{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"));
- }
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.MaxTracesPerSecond = v
-}
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) }
- if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) }
- return err
-}
-
-func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false }
- return true
-}
-
-func (p *RateLimitingSamplingStrategy) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - Operation
-// - ProbabilisticSampling
-type OperationSamplingStrategy struct {
- Operation string `thrift:"operation,1,required" db:"operation" json:"operation"`
- ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"`
-}
-
-func NewOperationSamplingStrategy() *OperationSamplingStrategy {
- return &OperationSamplingStrategy{}
-}
-
-
-func (p *OperationSamplingStrategy) GetOperation() string {
- return p.Operation
-}
-var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
- if !p.IsSetProbabilisticSampling() {
- return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
- }
-return p.ProbabilisticSampling
-}
-func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
- return p.ProbabilisticSampling != nil
-}
-
-func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOperation bool = false;
- var issetProbabilisticSampling bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetOperation = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetProbabilisticSampling = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOperation{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"));
- }
- if !issetProbabilisticSampling{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"));
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Operation = v
-}
- return nil
-}
-
-func (p *OperationSamplingStrategy) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
- if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
- }
- return nil
-}
-
-func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.Operation)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) }
- return err
-}
-
-func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
- if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
- return err
-}
-
-func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Operation != other.Operation { return false }
- if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
- return true
-}
-
-func (p *OperationSamplingStrategy) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
-}
-
-// Attributes:
-// - DefaultSamplingProbability
-// - DefaultLowerBoundTracesPerSecond
-// - PerOperationStrategies
-// - DefaultUpperBoundTracesPerSecond
-type PerOperationSamplingStrategies struct {
- DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"`
- DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"`
- PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"`
- DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"`
-}
-
-func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
- return &PerOperationSamplingStrategies{}
-}
-
-
-func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
- return p.DefaultSamplingProbability
-}
-
-func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
- return p.DefaultLowerBoundTracesPerSecond
-}
-
-func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
- return p.PerOperationStrategies
-}
-var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
-func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
- if !p.IsSetDefaultUpperBoundTracesPerSecond() {
- return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
- }
-return *p.DefaultUpperBoundTracesPerSecond
-}
-func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
- return p.DefaultUpperBoundTracesPerSecond != nil
-}
-
-func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetDefaultSamplingProbability bool = false;
- var issetDefaultLowerBoundTracesPerSecond bool = false;
- var issetPerOperationStrategies bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetDefaultSamplingProbability = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- issetDefaultLowerBoundTracesPerSecond = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- issetPerOperationStrategies = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.DOUBLE {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetDefaultSamplingProbability{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"));
- }
- if !issetDefaultLowerBoundTracesPerSecond{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"));
- }
- if !issetPerOperationStrategies{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"));
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.DefaultSamplingProbability = v
-}
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.DefaultLowerBoundTracesPerSecond = v
-}
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*OperationSamplingStrategy, 0, size)
- p.PerOperationStrategies = tSlice
- for i := 0; i < size; i ++ {
- _elem0 := &OperationSamplingStrategy{}
- if err := _elem0.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadDouble(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.DefaultUpperBoundTracesPerSecond = &v
-}
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) }
- if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
- if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.PerOperationStrategies {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetDefaultUpperBoundTracesPerSecond() {
- if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
- if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
- }
- return err
-}
-
-func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false }
- if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false }
- if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false }
- for i, _tgt := range p.PerOperationStrategies {
- _src1 := other.PerOperationStrategies[i]
- if !_tgt.Equals(_src1) { return false }
- }
- if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond {
- if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil {
- return false
- }
- if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false }
- }
- return true
-}
-
-func (p *PerOperationSamplingStrategies) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
-}
-
-// Attributes:
-// - StrategyType
-// - ProbabilisticSampling
-// - RateLimitingSampling
-// - OperationSampling
-type SamplingStrategyResponse struct {
- StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"`
- ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"`
- RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"`
- OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"`
-}
-
-func NewSamplingStrategyResponse() *SamplingStrategyResponse {
- return &SamplingStrategyResponse{}
-}
-
-
-func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
- return p.StrategyType
-}
-var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
-func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
- if !p.IsSetProbabilisticSampling() {
- return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
- }
-return p.ProbabilisticSampling
-}
-var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
-func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
- if !p.IsSetRateLimitingSampling() {
- return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
- }
-return p.RateLimitingSampling
-}
-var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
-func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
- if !p.IsSetOperationSampling() {
- return SamplingStrategyResponse_OperationSampling_DEFAULT
- }
-return p.OperationSampling
-}
-func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
- return p.ProbabilisticSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
- return p.RateLimitingSampling != nil
-}
-
-func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
- return p.OperationSampling != nil
-}
-
-func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetStrategyType bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetStrategyType = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetStrategyType{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"));
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- temp := SamplingStrategyType(v)
- p.StrategyType = temp
-}
- return nil
-}
-
-func (p *SamplingStrategyResponse) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
- if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
- if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- p.OperationSampling = &PerOperationSamplingStrategies{}
- if err := p.OperationSampling.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
- }
- return nil
-}
-
-func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetProbabilisticSampling() {
- if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
- if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetRateLimitingSampling() {
- if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) }
- if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetOperationSampling() {
- if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) }
- if err := p.OperationSampling.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) }
- }
- return err
-}
-
-func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.StrategyType != other.StrategyType { return false }
- if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
- if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false }
- if !p.OperationSampling.Equals(other.OperationSampling) { return false }
- return true
-}
-
-func (p *SamplingStrategyResponse) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
-}
-
-type SamplingManager interface {
- // Parameters:
- // - ServiceName
- GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error)
-}
-
-type SamplingManagerClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
- return &SamplingManagerClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
- return &SamplingManagerClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient {
- return &SamplingManagerClient{
- c: c,
- }
-}
-
-func (p *SamplingManagerClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - ServiceName
-func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) {
- var _args2 SamplingManagerGetSamplingStrategyArgs
- _args2.ServiceName = serviceName
- var _result4 SamplingManagerGetSamplingStrategyResult
- var _meta3 thrift.ResponseMeta
- _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4)
- p.SetLastResponseMeta_(_meta3)
- if _err != nil {
- return
- }
- return _result4.GetSuccess(), nil
-}
-
-type SamplingManagerProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler SamplingManager
-}
-
-func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
-
- self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
- self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler}
-return self5
-}
-
-func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil { return false, thrift.WrapTException(err2) }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x6.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x6
-
-}
-
-type samplingManagerProcessorGetSamplingStrategy struct {
- handler SamplingManager
-}
-
-func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := SamplingManagerGetSamplingStrategyArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := SamplingManagerGetSamplingStrategyResult{}
- var retval *SamplingStrategyResponse
- if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error())
- oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - ServiceName
-type SamplingManagerGetSamplingStrategyArgs struct {
- ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
-}
-
-func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
- return &SamplingManagerGetSamplingStrategyArgs{}
-}
-
-
-func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
- return p.ServiceName
-}
-func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.ServiceName = v
-}
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
- return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type SamplingManagerGetSamplingStrategyResult struct {
- Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
- return &SamplingManagerGetSamplingStrategyResult{}
-}
-
-var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
-func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
- if !p.IsSetSuccess() {
- return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
- }
-return p.Success
-}
-func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- p.Success = &SamplingStrategyResponse{}
- if err := p.Success.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
- if err := p.Success.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
- }
- return err
-}
-
-func (p *SamplingManagerGetSamplingStrategyResult) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
-}
-
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
deleted file mode 100644
index ebf43018fe..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-var GoUnusedProtection__ int;
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
deleted file mode 100644
index 7a924b9770..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import(
- "bytes"
- "context"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-const CLIENT_SEND = "cs"
-const CLIENT_RECV = "cr"
-const SERVER_SEND = "ss"
-const SERVER_RECV = "sr"
-const MESSAGE_SEND = "ms"
-const MESSAGE_RECV = "mr"
-const WIRE_SEND = "ws"
-const WIRE_RECV = "wr"
-const CLIENT_SEND_FRAGMENT = "csf"
-const CLIENT_RECV_FRAGMENT = "crf"
-const SERVER_SEND_FRAGMENT = "ssf"
-const SERVER_RECV_FRAGMENT = "srf"
-const LOCAL_COMPONENT = "lc"
-const CLIENT_ADDR = "ca"
-const SERVER_ADDR = "sa"
-const MESSAGE_ADDR = "ma"
-
-func init() {
-}
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
deleted file mode 100644
index b00ecd23fc..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
+++ /dev/null
@@ -1,1853 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import(
- "bytes"
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "time"
- "github.com/uber/jaeger-client-go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type AnnotationType int64
-const (
- AnnotationType_BOOL AnnotationType = 0
- AnnotationType_BYTES AnnotationType = 1
- AnnotationType_I16 AnnotationType = 2
- AnnotationType_I32 AnnotationType = 3
- AnnotationType_I64 AnnotationType = 4
- AnnotationType_DOUBLE AnnotationType = 5
- AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
- switch p {
- case AnnotationType_BOOL: return "BOOL"
- case AnnotationType_BYTES: return "BYTES"
- case AnnotationType_I16: return "I16"
- case AnnotationType_I32: return "I32"
- case AnnotationType_I64: return "I64"
- case AnnotationType_DOUBLE: return "DOUBLE"
- case AnnotationType_STRING: return "STRING"
- }
- return ""
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
- switch s {
- case "BOOL": return AnnotationType_BOOL, nil
- case "BYTES": return AnnotationType_BYTES, nil
- case "I16": return AnnotationType_I16, nil
- case "I32": return AnnotationType_I32, nil
- case "I64": return AnnotationType_I64, nil
- case "DOUBLE": return AnnotationType_DOUBLE, nil
- case "STRING": return AnnotationType_STRING, nil
- }
- return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
-return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
-q, err := AnnotationTypeFromString(string(text))
-if (err != nil) {
-return err
-}
-*p = q
-return nil
-}
-
-func (p *AnnotationType) Scan(value interface{}) error {
-v, ok := value.(int64)
-if !ok {
-return errors.New("Scan value is not int64")
-}
-*p = AnnotationType(v)
-return nil
-}
-
-func (p * AnnotationType) Value() (driver.Value, error) {
- if p == nil {
- return nil, nil
- }
-return int64(*p), nil
-}
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-// - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-// - Port: IPv4 port
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-//
-// Conventionally, when the port isn't known, port = 0.
-// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
- Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
- Port int16 `thrift:"port,2" db:"port" json:"port"`
- ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
- Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
- return &Endpoint{}
-}
-
-
-func (p *Endpoint) GetIpv4() int32 {
- return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
- return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
- return p.ServiceName
-}
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
- return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
- return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.I16 {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Ipv4 = v
-}
- return nil
-}
-
-func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.Port = v
-}
- return nil
-}
-
-func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.ServiceName = v
-}
- return nil
-}
-
-func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.Ipv6 = v
-}
- return nil
-}
-
-func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
- return err
-}
-
-func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
- if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
- return err
-}
-
-func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
- return err
-}
-
-func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetIpv6() {
- if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
- if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
- }
- return err
-}
-
-func (p *Endpoint) Equals(other *Endpoint) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ipv4 != other.Ipv4 { return false }
- if p.Port != other.Port { return false }
- if p.ServiceName != other.ServiceName { return false }
- if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false }
- return true
-}
-
-func (p *Endpoint) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// An annotation is similar to a log statement. It includes a host field which
-// allows these events to be attributed properly, and also aggregatable.
-//
-// Attributes:
-// - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-// - Value
-// - Host: Always the host that recorded the event. By specifying the host you allow
-// rollup of all events (such as client requests to a service) by IP address.
-type Annotation struct {
- Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
- Value string `thrift:"value,2" db:"value" json:"value"`
- Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
- return &Annotation{}
-}
-
-
-func (p *Annotation) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
- return p.Value
-}
-var Annotation_Host_DEFAULT *Endpoint
-func (p *Annotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return Annotation_Host_DEFAULT
- }
-return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Timestamp = v
-}
- return nil
-}
-
-func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.Value = v
-}
- return nil
-}
-
-func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
- return err
-}
-
-func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
- return err
-}
-
-func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
- if err := p.Host.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
- }
- return err
-}
-
-func (p *Annotation) Equals(other *Annotation) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Timestamp != other.Timestamp { return false }
- if p.Value != other.Value { return false }
- if !p.Host.Equals(other.Host) { return false }
- return true
-}
-
-func (p *Annotation) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of "http.uri" could the path to a resource in a
-// RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.uri" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-// - Key
-// - Value
-// - AnnotationType
-// - Host: The host that recorded tag, which allows you to differentiate between
-// multiple tags with the same key. There are two exceptions to this.
-//
-// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, or clients such as web browsers.
-type BinaryAnnotation struct {
- Key string `thrift:"key,1" db:"key" json:"key"`
- Value []byte `thrift:"value,2" db:"value" json:"value"`
- AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
- Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
- return &BinaryAnnotation{}
-}
-
-
-func (p *BinaryAnnotation) GetKey() string {
- return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
- return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
- return p.AnnotationType
-}
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-func (p *BinaryAnnotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return BinaryAnnotation_Host_DEFAULT
- }
-return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 2:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField2(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.I32 {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.STRUCT {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Key = v
-}
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(ctx); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
-} else {
- p.Value = v
-}
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- temp := AnnotationType(v)
- p.AnnotationType = temp
-}
- return nil
-}
-
-func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField2(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
- return err
-}
-
-func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
- if err := oprot.WriteBinary(ctx, p.Value); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
- return err
-}
-
-func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
- if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
- return err
-}
-
-func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
- if err := p.Host.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
- }
- return err
-}
-
-func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Key != other.Key { return false }
- if bytes.Compare(p.Value, other.Value) != 0 { return false }
- if p.AnnotationType != other.AnnotationType { return false }
- if !p.Host.Equals(other.Host) { return false }
- return true
-}
-
-func (p *BinaryAnnotation) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// The root span is where trace_id = id and parent_id = Nil. The root span is
-// usually the longest interval in the trace, starting with a SERVER_RECV
-// annotation and ending with a SERVER_SEND.
-//
-// Attributes:
-// - TraceID
-// - Name: Span name in lowercase, rpc method for example
-//
-// Conventionally, when the span name isn't known, name = "unknown".
-// - ID
-// - ParentID
-// - Annotations
-// - BinaryAnnotations
-// - Debug
-// - Timestamp: Microseconds from epoch of the creation of this span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// This field is optional for compatibility with old data: first-party span
-// stores are expected to support this at time of introduction.
-// - Duration: Measurement of duration in microseconds, used to support queries.
-//
-// This value should be set directly, where possible. Doing so encourages
-// precise measurement decoupled from problems of clocks, such as skew or NTP
-// updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
- TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
- // unused field # 2
- Name string `thrift:"name,3" db:"name" json:"name"`
- ID int64 `thrift:"id,4" db:"id" json:"id"`
- ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
- Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
- // unused field # 7
- BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
- Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
- Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
- Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
- TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-
-func (p *Span) GetTraceID() int64 {
- return p.TraceID
-}
-
-func (p *Span) GetName() string {
- return p.Name
-}
-
-func (p *Span) GetID() int64 {
- return p.ID
-}
-var Span_ParentID_DEFAULT int64
-func (p *Span) GetParentID() int64 {
- if !p.IsSetParentID() {
- return Span_ParentID_DEFAULT
- }
-return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
- return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
- return p.BinaryAnnotations
-}
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
- return p.Debug
-}
-var Span_Timestamp_DEFAULT int64
-func (p *Span) GetTimestamp() int64 {
- if !p.IsSetTimestamp() {
- return Span_Timestamp_DEFAULT
- }
-return *p.Timestamp
-}
-var Span_Duration_DEFAULT int64
-func (p *Span) GetDuration() int64 {
- if !p.IsSetDuration() {
- return Span_Duration_DEFAULT
- }
-return *p.Duration
-}
-var Span_TraceIDHigh_DEFAULT int64
-func (p *Span) GetTraceIDHigh() int64 {
- if !p.IsSetTraceIDHigh() {
- return Span_TraceIDHigh_DEFAULT
- }
-return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
- return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
- return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
- return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
- return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
- return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 3:
- if fieldTypeId == thrift.STRING {
- if err := p.ReadField3(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField4(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 5:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField5(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 6:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField6(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 8:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField8(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 9:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField9(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 10:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField10(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 11:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField11(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- case 12:
- if fieldTypeId == thrift.I64 {
- if err := p.ReadField12(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.TraceID = v
-}
- return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(ctx); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
-} else {
- p.Name = v
-}
- return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
-} else {
- p.ID = v
-}
- return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
-} else {
- p.ParentID = &v
-}
- return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Annotation, 0, size)
- p.Annotations = tSlice
- for i := 0; i < size; i ++ {
- _elem0 := &Annotation{}
- if err := _elem0.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Annotations = append(p.Annotations, _elem0)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BinaryAnnotation, 0, size)
- p.BinaryAnnotations = tSlice
- for i := 0; i < size; i ++ {
- _elem1 := &BinaryAnnotation{}
- if err := _elem1.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
- }
- p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
-} else {
- p.Debug = v
-}
- return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 10: ", err)
-} else {
- p.Timestamp = &v
-}
- return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 11: ", err)
-} else {
- p.Duration = &v
-}
- return nil
-}
-
-func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(ctx); err != nil {
- return thrift.PrependError("error reading field 12: ", err)
-} else {
- p.TraceIDHigh = &v
-}
- return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- if err := p.writeField3(ctx, oprot); err != nil { return err }
- if err := p.writeField4(ctx, oprot); err != nil { return err }
- if err := p.writeField5(ctx, oprot); err != nil { return err }
- if err := p.writeField6(ctx, oprot); err != nil { return err }
- if err := p.writeField8(ctx, oprot); err != nil { return err }
- if err := p.writeField9(ctx, oprot); err != nil { return err }
- if err := p.writeField10(ctx, oprot); err != nil { return err }
- if err := p.writeField11(ctx, oprot); err != nil { return err }
- if err := p.writeField12(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
- return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
- if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
- return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
- return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetParentID() {
- if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Annotations {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
- return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.BinaryAnnotations {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
- return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetDebug() {
- if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
- if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTimestamp() {
- if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetDuration() {
- if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
- }
- return err
-}
-
-func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetTraceIDHigh() {
- if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
- if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
- }
- return err
-}
-
-func (p *Span) Equals(other *Span) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.TraceID != other.TraceID { return false }
- if p.Name != other.Name { return false }
- if p.ID != other.ID { return false }
- if p.ParentID != other.ParentID {
- if p.ParentID == nil || other.ParentID == nil {
- return false
- }
- if (*p.ParentID) != (*other.ParentID) { return false }
- }
- if len(p.Annotations) != len(other.Annotations) { return false }
- for i, _tgt := range p.Annotations {
- _src2 := other.Annotations[i]
- if !_tgt.Equals(_src2) { return false }
- }
- if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false }
- for i, _tgt := range p.BinaryAnnotations {
- _src3 := other.BinaryAnnotations[i]
- if !_tgt.Equals(_src3) { return false }
- }
- if p.Debug != other.Debug { return false }
- if p.Timestamp != other.Timestamp {
- if p.Timestamp == nil || other.Timestamp == nil {
- return false
- }
- if (*p.Timestamp) != (*other.Timestamp) { return false }
- }
- if p.Duration != other.Duration {
- if p.Duration == nil || other.Duration == nil {
- return false
- }
- if (*p.Duration) != (*other.Duration) { return false }
- }
- if p.TraceIDHigh != other.TraceIDHigh {
- if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
- return false
- }
- if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false }
- }
- return true
-}
-
-func (p *Span) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-// - Ok
-type Response struct {
- Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewResponse() *Response {
- return &Response{}
-}
-
-
-func (p *Response) GetOk() bool {
- return p.Ok
-}
-func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- var issetOk bool = false;
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.BOOL {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- issetOk = true
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- if !issetOk{
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
- }
- return nil
-}
-
-func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(ctx); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
-} else {
- p.Ok = v
-}
- return nil
-}
-
-func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
- if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
- return err
-}
-
-func (p *Response) Equals(other *Response) bool {
- if p == other {
- return true
- } else if p == nil || other == nil {
- return false
- }
- if p.Ok != other.Ok { return false }
- return true
-}
-
-func (p *Response) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Response(%+v)", *p)
-}
-
-type ZipkinCollector interface {
- // Parameters:
- // - Spans
- SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
-}
-
-type ZipkinCollectorClient struct {
- c thrift.TClient
- meta thrift.ResponseMeta
-}
-
-func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
-}
-
-func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: thrift.NewTStandardClient(iprot, oprot),
- }
-}
-
-func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
- return &ZipkinCollectorClient{
- c: c,
- }
-}
-
-func (p *ZipkinCollectorClient) Client_() thrift.TClient {
- return p.c
-}
-
-func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
- return p.meta
-}
-
-func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
- p.meta = meta
-}
-
-// Parameters:
-// - Spans
-func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
- var _args4 ZipkinCollectorSubmitZipkinBatchArgs
- _args4.Spans = spans
- var _result6 ZipkinCollectorSubmitZipkinBatchResult
- var _meta5 thrift.ResponseMeta
- _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
- p.SetLastResponseMeta_(_meta5)
- if _err != nil {
- return
- }
- return _result6.GetSuccess(), nil
-}
-
-type ZipkinCollectorProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler ZipkinCollector
-}
-
-func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
-
- self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
- self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler}
-return self7
-}
-
-func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
- if err2 != nil { return false, thrift.WrapTException(err2) }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(ctx, thrift.STRUCT)
- iprot.ReadMessageEnd(ctx)
- x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
- oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
- x8.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, x8
-
-}
-
-type zipkinCollectorProcessorSubmitZipkinBatch struct {
- handler ZipkinCollector
-}
-
-func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := ZipkinCollectorSubmitZipkinBatchArgs{}
- var err2 error
- if err2 = args.Read(ctx, iprot); err2 != nil {
- iprot.ReadMessageEnd(ctx)
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
- oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return false, thrift.WrapTException(err2)
- }
- iprot.ReadMessageEnd(ctx)
-
- tickerCancel := func() {}
- // Start a goroutine to do server side connectivity check.
- if thrift.ServerConnectivityCheckInterval > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
- var tickerCtx context.Context
- tickerCtx, tickerCancel = context.WithCancel(context.Background())
- defer tickerCancel()
- go func(ctx context.Context, cancel context.CancelFunc) {
- ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- if !iprot.Transport().IsOpen() {
- cancel()
- return
- }
- }
- }
- }(tickerCtx, cancel)
- }
-
- result := ZipkinCollectorSubmitZipkinBatchResult{}
- var retval []*Response
- if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
- tickerCancel()
- if err2 == thrift.ErrAbandonRequest {
- return false, thrift.WrapTException(err2)
- }
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error())
- oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
- x.Write(ctx, oprot)
- oprot.WriteMessageEnd(ctx)
- oprot.Flush(ctx)
- return true, thrift.WrapTException(err2)
- } else {
- result.Success = retval
- }
- tickerCancel()
- if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = thrift.WrapTException(err2)
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Spans
-type ZipkinCollectorSubmitZipkinBatchArgs struct {
- Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
- return &ZipkinCollectorSubmitZipkinBatchArgs{}
-}
-
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
- return p.Spans
-}
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField1(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Span, 0, size)
- p.Spans = tSlice
- for i := 0; i < size; i ++ {
- _elem9 := &Span{}
- if err := _elem9.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
- }
- p.Spans = append(p.Spans, _elem9)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField1(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Spans {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type ZipkinCollectorSubmitZipkinBatchResult struct {
- Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
- return &ZipkinCollectorSubmitZipkinBatchResult{}
-}
-
-var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
- return p.Success
-}
-func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP { break; }
- switch fieldId {
- case 0:
- if fieldTypeId == thrift.LIST {
- if err := p.ReadField0(ctx, iprot); err != nil {
- return err
- }
- } else {
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- default:
- if err := iprot.Skip(ctx, fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin(ctx)
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Response, 0, size)
- p.Success = tSlice
- for i := 0; i < size; i ++ {
- _elem10 := &Response{}
- if err := _elem10.Read(ctx, iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
- }
- p.Success = append(p.Success, _elem10)
- }
- if err := iprot.ReadListEnd(ctx); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
- if p != nil {
- if err := p.writeField0(ctx, oprot); err != nil { return err }
- }
- if err := oprot.WriteFieldStop(ctx); err != nil {
- return thrift.PrependError("write field stop error: ", err) }
- if err := oprot.WriteStructEnd(ctx); err != nil {
- return thrift.PrependError("write struct stop error: ", err) }
- return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
- if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Success {
- if err := v.Write(ctx, oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(ctx); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(ctx); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
- }
- return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
-}
-
-
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
deleted file mode 100644
index c4c38ae01a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Apache Thrift
-
-This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
-
-It is vendored code to avoid compatibility issues with Thrift versions.
-
-The file logger.go is modified to remove dependency on "testing" (see Issue #585).
-
-See:
- * https://github.com/jaegertracing/jaeger-client-go/pull/584
- * https://github.com/jaegertracing/jaeger-client-go/pull/303
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
deleted file mode 100644
index 32d5b0147a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-const (
- UNKNOWN_APPLICATION_EXCEPTION = 0
- UNKNOWN_METHOD = 1
- INVALID_MESSAGE_TYPE_EXCEPTION = 2
- WRONG_METHOD_NAME = 3
- BAD_SEQUENCE_ID = 4
- MISSING_RESULT = 5
- INTERNAL_ERROR = 6
- PROTOCOL_ERROR = 7
- INVALID_TRANSFORM = 8
- INVALID_PROTOCOL = 9
- UNSUPPORTED_CLIENT_TYPE = 10
-)
-
-var defaultApplicationExceptionMessage = map[int32]string{
- UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
- UNKNOWN_METHOD: "unknown method",
- INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
- WRONG_METHOD_NAME: "wrong method name",
- BAD_SEQUENCE_ID: "bad sequence ID",
- MISSING_RESULT: "missing result",
- INTERNAL_ERROR: "unknown internal error",
- PROTOCOL_ERROR: "unknown protocol error",
- INVALID_TRANSFORM: "Invalid transform",
- INVALID_PROTOCOL: "Invalid protocol",
- UNSUPPORTED_CLIENT_TYPE: "Unsupported client type",
-}
-
-// Application level Thrift exception
-type TApplicationException interface {
- TException
- TypeId() int32
- Read(ctx context.Context, iprot TProtocol) error
- Write(ctx context.Context, oprot TProtocol) error
-}
-
-type tApplicationException struct {
- message string
- type_ int32
-}
-
-var _ TApplicationException = (*tApplicationException)(nil)
-
-func (tApplicationException) TExceptionType() TExceptionType {
- return TExceptionTypeApplication
-}
-
-func (e tApplicationException) Error() string {
- if e.message != "" {
- return e.message
- }
- return defaultApplicationExceptionMessage[e.type_]
-}
-
-func NewTApplicationException(type_ int32, message string) TApplicationException {
- return &tApplicationException{message, type_}
-}
-
-func (p *tApplicationException) TypeId() int32 {
- return p.type_
-}
-
-func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
- // TODO: this should really be generated by the compiler
- _, err := iprot.ReadStructBegin(ctx)
- if err != nil {
- return err
- }
-
- message := ""
- type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
-
- for {
- _, ttype, id, err := iprot.ReadFieldBegin(ctx)
- if err != nil {
- return err
- }
- if ttype == STOP {
- break
- }
- switch id {
- case 1:
- if ttype == STRING {
- if message, err = iprot.ReadString(ctx); err != nil {
- return err
- }
- } else {
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- case 2:
- if ttype == I32 {
- if type_, err = iprot.ReadI32(ctx); err != nil {
- return err
- }
- } else {
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- default:
- if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
- return err
- }
- }
- if err = iprot.ReadFieldEnd(ctx); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(ctx); err != nil {
- return err
- }
-
- p.message = message
- p.type_ = type_
-
- return nil
-}
-
-func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
- err = oprot.WriteStructBegin(ctx, "TApplicationException")
- if len(p.Error()) > 0 {
- err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
- if err != nil {
- return
- }
- err = oprot.WriteString(ctx, p.Error())
- if err != nil {
- return
- }
- err = oprot.WriteFieldEnd(ctx)
- if err != nil {
- return
- }
- }
- err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
- if err != nil {
- return
- }
- err = oprot.WriteI32(ctx, p.type_)
- if err != nil {
- return
- }
- err = oprot.WriteFieldEnd(ctx)
- if err != nil {
- return
- }
- err = oprot.WriteFieldStop(ctx)
- if err != nil {
- return
- }
- err = oprot.WriteStructEnd(ctx)
- return
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
deleted file mode 100644
index 45c880d32f..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
-)
-
-type TBinaryProtocol struct {
- trans TRichTransport
- origTransport TTransport
- cfg *TConfiguration
- buffer [64]byte
-}
-
-type TBinaryProtocolFactory struct {
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
- return NewTBinaryProtocolConf(t, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
- return NewTBinaryProtocolConf(t, &TConfiguration{
- TBinaryStrictRead: &strictRead,
- TBinaryStrictWrite: &strictWrite,
-
- noPropagation: true,
- })
-}
-
-func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
- PropagateTConfiguration(t, conf)
- p := &TBinaryProtocol{
- origTransport: t,
- cfg: conf,
- }
- if et, ok := t.(TRichTransport); ok {
- p.trans = et
- } else {
- p.trans = NewTRichTransport(t)
- }
- return p
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
- return NewTBinaryProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
- return NewTBinaryProtocolFactoryConf(&TConfiguration{
- TBinaryStrictRead: &strictRead,
- TBinaryStrictWrite: &strictWrite,
-
- noPropagation: true,
- })
-}
-
-func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
- return &TBinaryProtocolFactory{
- cfg: conf,
- }
-}
-
-func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
- return NewTBinaryProtocolConf(t, p.cfg)
-}
-
-func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-/**
- * Writing Methods
- */
-
-func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
- if p.cfg.GetTBinaryStrictWrite() {
- version := uint32(VERSION_1) | uint32(typeId)
- e := p.WriteI32(ctx, int32(version))
- if e != nil {
- return e
- }
- e = p.WriteString(ctx, name)
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, seqId)
- return e
- } else {
- e := p.WriteString(ctx, name)
- if e != nil {
- return e
- }
- e = p.WriteByte(ctx, int8(typeId))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, seqId)
- return e
- }
- return nil
-}
-
-func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- e := p.WriteByte(ctx, int8(typeId))
- if e != nil {
- return e
- }
- e = p.WriteI16(ctx, id)
- return e
-}
-
-func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
- e := p.WriteByte(ctx, STOP)
- return e
-}
-
-func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- e := p.WriteByte(ctx, int8(keyType))
- if e != nil {
- return e
- }
- e = p.WriteByte(ctx, int8(valueType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- e := p.WriteByte(ctx, int8(elemType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- e := p.WriteByte(ctx, int8(elemType))
- if e != nil {
- return e
- }
- e = p.WriteI32(ctx, int32(size))
- return e
-}
-
-func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
- if value {
- return p.WriteByte(ctx, 1)
- }
- return p.WriteByte(ctx, 0)
-}
-
-func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
- e := p.trans.WriteByte(byte(value))
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
- v := p.buffer[0:2]
- binary.BigEndian.PutUint16(v, uint16(value))
- _, e := p.trans.Write(v)
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
- v := p.buffer[0:4]
- binary.BigEndian.PutUint32(v, uint32(value))
- _, e := p.trans.Write(v)
- return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
- v := p.buffer[0:8]
- binary.BigEndian.PutUint64(v, uint64(value))
- _, err := p.trans.Write(v)
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
- return p.WriteI64(ctx, int64(math.Float64bits(value)))
-}
-
-func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
- e := p.WriteI32(ctx, int32(len(value)))
- if e != nil {
- return e
- }
- _, err := p.trans.WriteString(value)
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
- e := p.WriteI32(ctx, int32(len(value)))
- if e != nil {
- return e
- }
- _, err := p.trans.Write(value)
- return NewTProtocolException(err)
-}
-
-/**
- * Reading methods
- */
-
-func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return "", typeId, 0, NewTProtocolException(e)
- }
- if size < 0 {
- typeId = TMessageType(size & 0x0ff)
- version := int64(int64(size) & VERSION_MASK)
- if version != VERSION_1 {
- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
- }
- name, e = p.ReadString(ctx)
- if e != nil {
- return name, typeId, seqId, NewTProtocolException(e)
- }
- seqId, e = p.ReadI32(ctx)
- if e != nil {
- return name, typeId, seqId, NewTProtocolException(e)
- }
- return name, typeId, seqId, nil
- }
- if p.cfg.GetTBinaryStrictRead() {
- return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
- }
- name, e2 := p.readStringBody(size)
- if e2 != nil {
- return name, typeId, seqId, e2
- }
- b, e3 := p.ReadByte(ctx)
- if e3 != nil {
- return name, typeId, seqId, e3
- }
- typeId = TMessageType(b)
- seqId, e4 := p.ReadI32(ctx)
- if e4 != nil {
- return name, typeId, seqId, e4
- }
- return name, typeId, seqId, nil
-}
-
-func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- return
-}
-
-func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
- t, err := p.ReadByte(ctx)
- typeId = TType(t)
- if err != nil {
- return name, typeId, seqId, err
- }
- if t != STOP {
- seqId, err = p.ReadI16(ctx)
- }
- return name, typeId, seqId, err
-}
-
-func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
- return nil
-}
-
-var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
-
-func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
- k, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- kType = TType(k)
- v, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- vType = TType(v)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
- return kType, vType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- b, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- elemType = TType(b)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
-
- return
-}
-
-func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- b, e := p.ReadByte(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- elemType = TType(b)
- size32, e := p.ReadI32(ctx)
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
- return elemType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
- b, e := p.ReadByte(ctx)
- v := true
- if b != 1 {
- v = false
- }
- return v, e
-}
-
-func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.trans.ReadByte()
- return int8(v), err
-}
-
-func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- buf := p.buffer[0:2]
- err = p.readAll(ctx, buf)
- value = int16(binary.BigEndian.Uint16(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- buf := p.buffer[0:4]
- err = p.readAll(ctx, buf)
- value = int32(binary.BigEndian.Uint32(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- buf := p.buffer[0:8]
- err = p.readAll(ctx, buf)
- value = int64(binary.BigEndian.Uint64(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- buf := p.buffer[0:8]
- err = p.readAll(ctx, buf)
- value = math.Float64frombits(binary.BigEndian.Uint64(buf))
- return value, err
-}
-
-func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return "", e
- }
- err = checkSizeForProtocol(size, p.cfg)
- if err != nil {
- return
- }
- if size < 0 {
- err = invalidDataLength
- return
- }
- if size == 0 {
- return "", nil
- }
- if size < int32(len(p.buffer)) {
- // Avoid allocation on small reads
- buf := p.buffer[:size]
- read, e := io.ReadFull(p.trans, buf)
- return string(buf[:read]), NewTProtocolException(e)
- }
-
- return p.readStringBody(size)
-}
-
-func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
- size, e := p.ReadI32(ctx)
- if e != nil {
- return nil, e
- }
- if err := checkSizeForProtocol(size, p.cfg); err != nil {
- return nil, err
- }
-
- buf, err := safeReadBytes(size, p.trans)
- return buf, NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TBinaryProtocol) Transport() TTransport {
- return p.origTransport
-}
-
-func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
- var read int
- _, deadlineSet := ctx.Deadline()
- for {
- read, err = io.ReadFull(p.trans, buf)
- if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
- // This is I/O timeout without anything read,
- // and we still have time left, keep retrying.
- continue
- }
- // For anything else, don't retry
- break
- }
- return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
- buf, err := safeReadBytes(size, p.trans)
- return string(buf), NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
- PropagateTConfiguration(p.origTransport, conf)
- p.cfg = conf
-}
-
-var (
- _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
- _ TConfigurationSetter = (*TBinaryProtocol)(nil)
-)
-
-// This function is shared between TBinaryProtocol and TCompactProtocol.
-//
-// It tries to read size bytes from trans, in a way that prevents large
-// allocations when size is insanely large (mostly caused by malformed message).
-func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
- if size < 0 {
- return nil, nil
- }
-
- buf := new(bytes.Buffer)
- _, err := io.CopyN(buf, trans, int64(size))
- return buf.Bytes(), err
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
deleted file mode 100644
index ea2c01fdad..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/client.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package thrift
-
-import (
- "context"
- "fmt"
-)
-
-// ResponseMeta represents the metadata attached to the response.
-type ResponseMeta struct {
- // The headers in the response, if any.
- // If the underlying transport/protocol is not THeader, this will always be nil.
- Headers THeaderMap
-}
-
-type TClient interface {
- Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
-}
-
-type TStandardClient struct {
- seqId int32
- iprot, oprot TProtocol
-}
-
-// TStandardClient implements TClient, and uses the standard message format for Thrift.
-// It is not safe for concurrent use.
-func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
- return &TStandardClient{
- iprot: inputProtocol,
- oprot: outputProtocol,
- }
-}
-
-func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
- // Set headers from context object on THeaderProtocol
- if headerProt, ok := oprot.(*THeaderProtocol); ok {
- headerProt.ClearWriteHeaders()
- for _, key := range GetWriteHeaderList(ctx) {
- if value, ok := GetHeader(ctx, key); ok {
- headerProt.SetWriteHeader(key, value)
- }
- }
- }
-
- if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
- return err
- }
- if err := args.Write(ctx, oprot); err != nil {
- return err
- }
- if err := oprot.WriteMessageEnd(ctx); err != nil {
- return err
- }
- return oprot.Flush(ctx)
-}
-
-func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
- rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
- if err != nil {
- return err
- }
-
- if method != rMethod {
- return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
- } else if seqId != rSeqId {
- return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
- } else if rTypeId == EXCEPTION {
- var exception tApplicationException
- if err := exception.Read(ctx, iprot); err != nil {
- return err
- }
-
- if err := iprot.ReadMessageEnd(ctx); err != nil {
- return err
- }
-
- return &exception
- } else if rTypeId != REPLY {
- return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
- }
-
- if err := result.Read(ctx, iprot); err != nil {
- return err
- }
-
- return iprot.ReadMessageEnd(ctx)
-}
-
-func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
- p.seqId++
- seqId := p.seqId
-
- if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
- return ResponseMeta{}, err
- }
-
- // method is oneway
- if result == nil {
- return ResponseMeta{}, nil
- }
-
- err := p.Recv(ctx, p.iprot, seqId, method, result)
- var headers THeaderMap
- if hp, ok := p.iprot.(*THeaderProtocol); ok {
- headers = hp.transport.readHeaders
- }
- return ResponseMeta{
- Headers: headers,
- }, err
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
deleted file mode 100644
index a49225dabf..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
+++ /dev/null
@@ -1,865 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
-)
-
-const (
- COMPACT_PROTOCOL_ID = 0x082
- COMPACT_VERSION = 1
- COMPACT_VERSION_MASK = 0x1f
- COMPACT_TYPE_MASK = 0x0E0
- COMPACT_TYPE_BITS = 0x07
- COMPACT_TYPE_SHIFT_AMOUNT = 5
-)
-
-type tCompactType byte
-
-const (
- COMPACT_BOOLEAN_TRUE = 0x01
- COMPACT_BOOLEAN_FALSE = 0x02
- COMPACT_BYTE = 0x03
- COMPACT_I16 = 0x04
- COMPACT_I32 = 0x05
- COMPACT_I64 = 0x06
- COMPACT_DOUBLE = 0x07
- COMPACT_BINARY = 0x08
- COMPACT_LIST = 0x09
- COMPACT_SET = 0x0A
- COMPACT_MAP = 0x0B
- COMPACT_STRUCT = 0x0C
-)
-
-var (
- ttypeToCompactType map[TType]tCompactType
-)
-
-func init() {
- ttypeToCompactType = map[TType]tCompactType{
- STOP: STOP,
- BOOL: COMPACT_BOOLEAN_TRUE,
- BYTE: COMPACT_BYTE,
- I16: COMPACT_I16,
- I32: COMPACT_I32,
- I64: COMPACT_I64,
- DOUBLE: COMPACT_DOUBLE,
- STRING: COMPACT_BINARY,
- LIST: COMPACT_LIST,
- SET: COMPACT_SET,
- MAP: COMPACT_MAP,
- STRUCT: COMPACT_STRUCT,
- }
-}
-
-type TCompactProtocolFactory struct {
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTCompactProtocolFactoryConf instead.
-func NewTCompactProtocolFactory() *TCompactProtocolFactory {
- return NewTCompactProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
- return &TCompactProtocolFactory{
- cfg: conf,
- }
-}
-
-func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTCompactProtocolConf(trans, p.cfg)
-}
-
-func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
- p.cfg = conf
-}
-
-type TCompactProtocol struct {
- trans TRichTransport
- origTransport TTransport
-
- cfg *TConfiguration
-
- // Used to keep track of the last field for the current and previous structs,
- // so we can do the delta stuff.
- lastField []int
- lastFieldId int
-
- // If we encounter a boolean field begin, save the TField here so it can
- // have the value incorporated.
- booleanFieldName string
- booleanFieldId int16
- booleanFieldPending bool
-
- // If we read a field header, and it's a boolean field, save the boolean
- // value here so that readBool can use it.
- boolValue bool
- boolValueIsNotNull bool
- buffer [64]byte
-}
-
-// Deprecated: Use NewTCompactProtocolConf instead.
-func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
- return NewTCompactProtocolConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
- PropagateTConfiguration(trans, conf)
- p := &TCompactProtocol{
- origTransport: trans,
- cfg: conf,
- }
- if et, ok := trans.(TRichTransport); ok {
- p.trans = et
- } else {
- p.trans = NewTRichTransport(trans)
- }
-
- return p
-}
-
-//
-// Public Writing methods.
-//
-
-// Write a message header to the wire. Compact Protocol messages contain the
-// protocol version so we can migrate forwards in the future if need be.
-func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
- err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
- if err != nil {
- return NewTProtocolException(err)
- }
- err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
- if err != nil {
- return NewTProtocolException(err)
- }
- _, err = p.writeVarint32(seqid)
- if err != nil {
- return NewTProtocolException(err)
- }
- e := p.WriteString(ctx, name)
- return e
-
-}
-
-func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
-
-// Write a struct begin. This doesn't actually put anything on the wire. We
-// use it as an opportunity to put special placeholder markers on the field
-// stack so we can get the field id deltas correct.
-func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
- p.lastField = append(p.lastField, p.lastFieldId)
- p.lastFieldId = 0
- return nil
-}
-
-// Write a struct end. This doesn't actually put anything on the wire. We use
-// this as an opportunity to pop the last field from the current struct off
-// of the field stack.
-func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
- if len(p.lastField) <= 0 {
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
- }
- p.lastFieldId = p.lastField[len(p.lastField)-1]
- p.lastField = p.lastField[:len(p.lastField)-1]
- return nil
-}
-
-func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- if typeId == BOOL {
- // we want to possibly include the value, so we'll wait.
- p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
- return nil
- }
- _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
- return NewTProtocolException(err)
-}
-
-// The workhorse of writeFieldBegin. It has the option of doing a
-// 'type override' of the type header. This is used specifically in the
-// boolean field case.
-func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
- // short lastField = lastField_.pop();
-
- // if there's a type override, use that.
- var typeToWrite byte
- if typeOverride == 0xFF {
- typeToWrite = byte(p.getCompactType(typeId))
- } else {
- typeToWrite = typeOverride
- }
- // check if we can use delta encoding for the field id
- fieldId := int(id)
- written := 0
- if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
- // write them together
- err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
- if err != nil {
- return 0, err
- }
- } else {
- // write them separate
- err := p.writeByteDirect(typeToWrite)
- if err != nil {
- return 0, err
- }
- err = p.WriteI16(ctx, id)
- written = 1 + 2
- if err != nil {
- return 0, err
- }
- }
-
- p.lastFieldId = fieldId
- return written, nil
-}
-
-func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
- err := p.writeByteDirect(STOP)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- if size == 0 {
- err := p.writeByteDirect(0)
- return NewTProtocolException(err)
- }
- _, err := p.writeVarint32(int32(size))
- if err != nil {
- return NewTProtocolException(err)
- }
- err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
-
-// Write a list header.
-func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- _, err := p.writeCollectionBegin(elemType, size)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
-
-// Write a set header.
-func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- _, err := p.writeCollectionBegin(elemType, size)
- return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
- v := byte(COMPACT_BOOLEAN_FALSE)
- if value {
- v = byte(COMPACT_BOOLEAN_TRUE)
- }
- if p.booleanFieldPending {
- // we haven't written the field header yet
- _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
- p.booleanFieldPending = false
- return NewTProtocolException(err)
- }
- // we're not part of a field, so just write the value.
- err := p.writeByteDirect(v)
- return NewTProtocolException(err)
-}
-
-// Write a byte. Nothing to see here!
-func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
- err := p.writeByteDirect(byte(value))
- return NewTProtocolException(err)
-}
-
-// Write an I16 as a zigzag varint.
-func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
- _, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
- return NewTProtocolException(err)
-}
-
-// Write an i32 as a zigzag varint.
-func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
- _, err := p.writeVarint32(p.int32ToZigzag(value))
- return NewTProtocolException(err)
-}
-
-// Write an i64 as a zigzag varint.
-func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
- _, err := p.writeVarint64(p.int64ToZigzag(value))
- return NewTProtocolException(err)
-}
-
-// Write a double to the wire as 8 bytes.
-func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
- buf := p.buffer[0:8]
- binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
- _, err := p.trans.Write(buf)
- return NewTProtocolException(err)
-}
-
-// Write a string to the wire with a varint size preceding.
-func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
- _, e := p.writeVarint32(int32(len(value)))
- if e != nil {
- return NewTProtocolException(e)
- }
- if len(value) > 0 {
- }
- _, e = p.trans.WriteString(value)
- return e
-}
-
-// Write a byte array, using a varint for the size.
-func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
- _, e := p.writeVarint32(int32(len(bin)))
- if e != nil {
- return NewTProtocolException(e)
- }
- if len(bin) > 0 {
- _, e = p.trans.Write(bin)
- return NewTProtocolException(e)
- }
- return nil
-}
-
-//
-// Reading methods.
-//
-
-// Read a message header.
-func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- var protocolId byte
-
- _, deadlineSet := ctx.Deadline()
- for {
- protocolId, err = p.readByteDirect()
- if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
- // keep retrying I/O timeout errors since we still have
- // time left
- continue
- }
- // For anything else, don't retry
- break
- }
- if err != nil {
- return
- }
-
- if protocolId != COMPACT_PROTOCOL_ID {
- e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
- return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
- }
-
- versionAndType, err := p.readByteDirect()
- if err != nil {
- return
- }
-
- version := versionAndType & COMPACT_VERSION_MASK
- typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
- if version != COMPACT_VERSION {
- e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
- err = NewTProtocolExceptionWithType(BAD_VERSION, e)
- return
- }
- seqId, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- name, err = p.ReadString(ctx)
- return
-}
-
-func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
-
-// Read a struct begin. There's nothing on the wire for this, but it is our
-// opportunity to push a new struct begin marker onto the field stack.
-func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- p.lastField = append(p.lastField, p.lastFieldId)
- p.lastFieldId = 0
- return
-}
-
-// Doesn't actually consume any wire data, just removes the last field for
-// this struct from the field stack.
-func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
- // consume the last field we read off the wire.
- if len(p.lastField) <= 0 {
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
- }
- p.lastFieldId = p.lastField[len(p.lastField)-1]
- p.lastField = p.lastField[:len(p.lastField)-1]
- return nil
-}
-
-// Read a field header off the wire.
-func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
- t, err := p.readByteDirect()
- if err != nil {
- return
- }
-
- // if it's a stop, then we can return immediately, as the struct is over.
- if (t & 0x0f) == STOP {
- return "", STOP, 0, nil
- }
-
- // mask off the 4 MSB of the type header. it could contain a field id delta.
- modifier := int16((t & 0xf0) >> 4)
- if modifier == 0 {
- // not a delta. look ahead for the zigzag varint field id.
- id, err = p.ReadI16(ctx)
- if err != nil {
- return
- }
- } else {
- // has a delta. add the delta to the last read field id.
- id = int16(p.lastFieldId) + modifier
- }
- typeId, e := p.getTType(tCompactType(t & 0x0f))
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
-
- // if this happens to be a boolean field, the value is encoded in the type
- if p.isBoolType(t) {
- // save the boolean value in a special instance variable.
- p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
- p.boolValueIsNotNull = true
- }
-
- // push the new field onto the field stack so we can keep the deltas going.
- p.lastFieldId = int(id)
- return
-}
-
-func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
-
-// Read a map header off the wire. If the size is zero, skip reading the key
-// and value type. This means that 0-length maps will yield TMaps without the
-// "correct" types.
-func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
- size32, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size32 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size32)
-
- keyAndValueType := byte(STOP)
- if size != 0 {
- keyAndValueType, err = p.readByteDirect()
- if err != nil {
- return
- }
- }
- keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
- valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
- return
-}
-
-func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
-
-// Read a list header off the wire. If the list size is 0-14, the size will
-// be packed into the element type header. If it's a longer list, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- size_and_type, err := p.readByteDirect()
- if err != nil {
- return
- }
- size = int((size_and_type >> 4) & 0x0f)
- if size == 15 {
- size2, e := p.readVarint32()
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- if size2 < 0 {
- err = invalidDataLength
- return
- }
- size = int(size2)
- }
- elemType, e := p.getTType(tCompactType(size_and_type))
- if e != nil {
- err = NewTProtocolException(e)
- return
- }
- return
-}
-
-func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
-
-// Read a set header off the wire. If the set size is 0-14, the size will
-// be packed into the element type header. If it's a longer set, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.ReadListBegin(ctx)
-}
-
-func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
-
-// Read a boolean off the wire. If this is a boolean field, the value should
-// already have been read during readFieldBegin, so we'll just consume the
-// pre-stored value. Otherwise, read a byte.
-func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
- if p.boolValueIsNotNull {
- p.boolValueIsNotNull = false
- return p.boolValue, nil
- }
- v, err := p.readByteDirect()
- return v == COMPACT_BOOLEAN_TRUE, err
-}
-
-// Read a single byte off the wire. Nothing interesting here.
-func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.readByteDirect()
- if err != nil {
- return 0, NewTProtocolException(err)
- }
- return int8(v), err
-}
-
-// Read an i16 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- v, err := p.ReadI32(ctx)
- return int16(v), err
-}
-
-// Read an i32 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- v, e := p.readVarint32()
- if e != nil {
- return 0, NewTProtocolException(e)
- }
- value = p.zigzagToInt32(v)
- return value, nil
-}
-
-// Read an i64 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- v, e := p.readVarint64()
- if e != nil {
- return 0, NewTProtocolException(e)
- }
- value = p.zigzagToInt64(v)
- return value, nil
-}
-
-// No magic here - just read a double off the wire.
-func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- longBits := p.buffer[0:8]
- _, e := io.ReadFull(p.trans, longBits)
- if e != nil {
- return 0.0, NewTProtocolException(e)
- }
- return math.Float64frombits(p.bytesToUint64(longBits)), nil
-}
-
-// Reads a []byte (via readBinary), and then UTF-8 decodes it.
-func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
- length, e := p.readVarint32()
- if e != nil {
- return "", NewTProtocolException(e)
- }
- err = checkSizeForProtocol(length, p.cfg)
- if err != nil {
- return
- }
- if length == 0 {
- return "", nil
- }
- if length < int32(len(p.buffer)) {
- // Avoid allocation on small reads
- buf := p.buffer[:length]
- read, e := io.ReadFull(p.trans, buf)
- return string(buf[:read]), NewTProtocolException(e)
- }
-
- buf, e := safeReadBytes(length, p.trans)
- return string(buf), NewTProtocolException(e)
-}
-
-// Read a []byte from the wire.
-func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
- length, e := p.readVarint32()
- if e != nil {
- return nil, NewTProtocolException(e)
- }
- err = checkSizeForProtocol(length, p.cfg)
- if err != nil {
- return
- }
- if length == 0 {
- return []byte{}, nil
- }
-
- buf, e := safeReadBytes(length, p.trans)
- return buf, NewTProtocolException(e)
-}
-
-func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TCompactProtocol) Transport() TTransport {
- return p.origTransport
-}
-
-//
-// Internal writing methods
-//
-
-// Abstract method for writing the start of lists and sets. List and sets on
-// the wire differ only by the type indicator.
-func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
- if size <= 14 {
- return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
- }
- err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
- if err != nil {
- return 0, err
- }
- m, err := p.writeVarint32(int32(size))
- return 1 + m, err
-}
-
-// Write an i32 as a varint. Results in 1-5 bytes on the wire.
-// TODO(pomack): make a permanent buffer like writeVarint64?
-func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
- i32buf := p.buffer[0:5]
- idx := 0
- for {
- if (n & ^0x7F) == 0 {
- i32buf[idx] = byte(n)
- idx++
- // p.writeByteDirect(byte(n));
- break
- // return;
- } else {
- i32buf[idx] = byte((n & 0x7F) | 0x80)
- idx++
- // p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
- u := uint32(n)
- n = int32(u >> 7)
- }
- }
- return p.trans.Write(i32buf[0:idx])
-}
-
-// Write an i64 as a varint. Results in 1-10 bytes on the wire.
-func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
- varint64out := p.buffer[0:10]
- idx := 0
- for {
- if (n & ^0x7F) == 0 {
- varint64out[idx] = byte(n)
- idx++
- break
- } else {
- varint64out[idx] = byte((n & 0x7F) | 0x80)
- idx++
- u := uint64(n)
- n = int64(u >> 7)
- }
- }
- return p.trans.Write(varint64out[0:idx])
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
- return (l << 1) ^ (l >> 63)
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
- return (n << 1) ^ (n >> 31)
-}
-
-func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
- binary.LittleEndian.PutUint64(buf, n)
-}
-
-func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
- binary.LittleEndian.PutUint64(buf, uint64(n))
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-// Used internally by other writing methods that know they need to write a byte.
-func (p *TCompactProtocol) writeByteDirect(b byte) error {
- return p.trans.WriteByte(b)
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
- return 1, p.writeByteDirect(byte(n))
-}
-
-//
-// Internal reading methods
-//
-
-// Read an i32 from the wire as a varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 5 bytes.
-func (p *TCompactProtocol) readVarint32() (int32, error) {
- // if the wire contains the right stuff, this will just truncate the i64 we
- // read and get us the right sign.
- v, err := p.readVarint64()
- return int32(v), err
-}
-
-// Read an i64 from the wire as a proper varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 10 bytes.
-func (p *TCompactProtocol) readVarint64() (int64, error) {
- shift := uint(0)
- result := int64(0)
- for {
- b, err := p.readByteDirect()
- if err != nil {
- return 0, err
- }
- result |= int64(b&0x7f) << shift
- if (b & 0x80) != 0x80 {
- break
- }
- shift += 7
- }
- return result, nil
-}
-
-// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
-func (p *TCompactProtocol) readByteDirect() (byte, error) {
- return p.trans.ReadByte()
-}
-
-//
-// encoding helpers
-//
-
-// Convert from zigzag int to int.
-func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
- u := uint32(n)
- return int32(u>>1) ^ -(n & 1)
-}
-
-// Convert from zigzag long to long.
-func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
- u := uint64(n)
- return int64(u>>1) ^ -(n & 1)
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
- return int64(binary.LittleEndian.Uint64(b))
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
- return binary.LittleEndian.Uint64(b)
-}
-
-//
-// type testing and converting
-//
-
-func (p *TCompactProtocol) isBoolType(b byte) bool {
- return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
-}
-
-// Given a tCompactType constant, convert it to its corresponding
-// TType value.
-func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
- switch byte(t) & 0x0f {
- case STOP:
- return STOP, nil
- case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
- return BOOL, nil
- case COMPACT_BYTE:
- return BYTE, nil
- case COMPACT_I16:
- return I16, nil
- case COMPACT_I32:
- return I32, nil
- case COMPACT_I64:
- return I64, nil
- case COMPACT_DOUBLE:
- return DOUBLE, nil
- case COMPACT_BINARY:
- return STRING, nil
- case COMPACT_LIST:
- return LIST, nil
- case COMPACT_SET:
- return SET, nil
- case COMPACT_MAP:
- return MAP, nil
- case COMPACT_STRUCT:
- return STRUCT, nil
- }
- return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
-}
-
-// Given a TType value, find the appropriate TCompactProtocol.Types constant.
-func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
- return ttypeToCompactType[t]
-}
-
-func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
- PropagateTConfiguration(p.origTransport, conf)
- p.cfg = conf
-}
-
-var (
- _ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
- _ TConfigurationSetter = (*TCompactProtocol)(nil)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
deleted file mode 100644
index 454d9f3774..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "crypto/tls"
- "fmt"
- "time"
-)
-
-// Default TConfiguration values.
-const (
- DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
- DEFAULT_MAX_FRAME_SIZE = 16384000
-
- DEFAULT_TBINARY_STRICT_READ = false
- DEFAULT_TBINARY_STRICT_WRITE = true
-
- DEFAULT_CONNECT_TIMEOUT = 0
- DEFAULT_SOCKET_TIMEOUT = 0
-)
-
-// TConfiguration defines some configurations shared between TTransport,
-// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
-//
-// When constructing TConfiguration, you only need to specify the non-default
-// fields. All zero values have sane default values.
-//
-// Not all configurations defined are applicable to all implementations.
-// Implementations are free to ignore the configurations not applicable to them.
-//
-// All functions attached to this type are nil-safe.
-//
-// See [1] for spec.
-//
-// NOTE: When using TConfiguration, fill in all the configurations you want to
-// set across the stack, not only the ones you want to set in the immediate
-// TTransport/TProtocol.
-//
-// For example, say you want to migrate this old code into using TConfiguration:
-//
-// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
-// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
-// thrift.NewTTransportFactory(),
-// 1024 * 1024 * 256,
-// )
-// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
-//
-// This is the wrong way to do it because in the end the TConfiguration used by
-// socket and transFactory will be overwritten by the one used by protoFactory
-// because of TConfiguration propagation:
-//
-// // bad example, DO NOT USE
-// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
-// ConnectTimeout: time.Second,
-// SocketTimeout: time.Second,
-// })
-// transFactory := thrift.NewTFramedTransportFactoryConf(
-// thrift.NewTTransportFactory(),
-// &thrift.TConfiguration{
-// MaxFrameSize: 1024 * 1024 * 256,
-// },
-// )
-// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
-// TBinaryStrictRead: thrift.BoolPtr(true),
-// TBinaryStrictWrite: thrift.BoolPtr(true),
-// })
-//
-// This is the correct way to do it:
-//
-// conf := &thrift.TConfiguration{
-// ConnectTimeout: time.Second,
-// SocketTimeout: time.Second,
-//
-// MaxFrameSize: 1024 * 1024 * 256,
-//
-// TBinaryStrictRead: thrift.BoolPtr(true),
-// TBinaryStrictWrite: thrift.BoolPtr(true),
-// }
-// sccket := thrift.NewTSocketConf("host:port", conf)
-// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
-// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
-//
-// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
-type TConfiguration struct {
- // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
- MaxMessageSize int32
-
- // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
- //
- // Also if MaxMessageSize < MaxFrameSize,
- // MaxMessageSize will be used instead.
- MaxFrameSize int32
-
- // Connect and socket timeouts to be used by TSocket and TSSLSocket.
- //
- // 0 means no timeout.
- //
- // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
- // used.
- ConnectTimeout time.Duration
- SocketTimeout time.Duration
-
- // TLS config to be used by TSSLSocket.
- TLSConfig *tls.Config
-
- // Strict read/write configurations for TBinaryProtocol.
- //
- // BoolPtr helper function is available to use literal values.
- TBinaryStrictRead *bool
- TBinaryStrictWrite *bool
-
- // The wrapped protocol id to be used in THeader transport/protocol.
- //
- // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
- // are provided to help filling this value.
- THeaderProtocolID *THeaderProtocolID
-
- // Used internally by deprecated constructors, to avoid overriding
- // underlying TTransport/TProtocol's cfg by accidental propagations.
- //
- // For external users this is always false.
- noPropagation bool
-}
-
-// GetMaxMessageSize returns the max message size an implementation should
-// follow.
-//
-// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
-func (tc *TConfiguration) GetMaxMessageSize() int32 {
- if tc == nil || tc.MaxMessageSize <= 0 {
- return DEFAULT_MAX_MESSAGE_SIZE
- }
- return tc.MaxMessageSize
-}
-
-// GetMaxFrameSize returns the max frame size an implementation should follow.
-//
-// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
-//
-// If the configured max message size is smaller than the configured max frame
-// size, the smaller one will be returned instead.
-func (tc *TConfiguration) GetMaxFrameSize() int32 {
- if tc == nil {
- return DEFAULT_MAX_FRAME_SIZE
- }
- maxFrameSize := tc.MaxFrameSize
- if maxFrameSize <= 0 {
- maxFrameSize = DEFAULT_MAX_FRAME_SIZE
- }
- if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
- return maxMessageSize
- }
- return maxFrameSize
-}
-
-// GetConnectTimeout returns the connect timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetConnectTimeout() time.Duration {
- if tc == nil || tc.ConnectTimeout < 0 {
- return DEFAULT_CONNECT_TIMEOUT
- }
- return tc.ConnectTimeout
-}
-
-// GetSocketTimeout returns the socket timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetSocketTimeout() time.Duration {
- if tc == nil || tc.SocketTimeout < 0 {
- return DEFAULT_SOCKET_TIMEOUT
- }
- return tc.SocketTimeout
-}
-
-// GetTLSConfig returns the tls config should be used by TSSLSocket.
-//
-// It's nil-safe. If tc is nil, nil will be returned instead.
-func (tc *TConfiguration) GetTLSConfig() *tls.Config {
- if tc == nil {
- return nil
- }
- return tc.TLSConfig
-}
-
-// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
-// tc.TBinaryStrictRead is nil.
-func (tc *TConfiguration) GetTBinaryStrictRead() bool {
- if tc == nil || tc.TBinaryStrictRead == nil {
- return DEFAULT_TBINARY_STRICT_READ
- }
- return *tc.TBinaryStrictRead
-}
-
-// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
-// tc.TBinaryStrictWrite is nil.
-func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
- if tc == nil || tc.TBinaryStrictWrite == nil {
- return DEFAULT_TBINARY_STRICT_WRITE
- }
- return *tc.TBinaryStrictWrite
-}
-
-// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
-// THeaderProtocol clients (for servers, they always use the same one as the
-// client instead).
-//
-// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
-// THeaderProtocolDefault will be returned instead.
-// THeaderProtocolDefault will also be returned if configured value is invalid.
-func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
- if tc == nil || tc.THeaderProtocolID == nil {
- return THeaderProtocolDefault
- }
- protoID := *tc.THeaderProtocolID
- if err := protoID.Validate(); err != nil {
- return THeaderProtocolDefault
- }
- return protoID
-}
-
-// THeaderProtocolIDPtr validates and returns the pointer to id.
-//
-// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
-// and the validation error will be returned.
-func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
- err := id.Validate()
- if err != nil {
- id = THeaderProtocolDefault
- }
- return &id, err
-}
-
-// THeaderProtocolIDPtrMust validates and returns the pointer to id.
-//
-// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
-// instead of returning them.
-func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
- ptr, err := THeaderProtocolIDPtr(id)
- if err != nil {
- panic(err)
- }
- return ptr
-}
-
-// TConfigurationSetter is an optional interface TProtocol, TTransport,
-// TProtocolFactory, TTransportFactory, and other implementations can implement.
-//
-// It's intended to be called during intializations.
-// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
-// middle of a message is undefined:
-// It may or may not change the behavior of the current processing message,
-// and it may even cause the current message to fail.
-//
-// Note for implementations: SetTConfiguration might be called multiple times
-// with the same value in quick successions due to the implementation of the
-// propagation. Implementations should make SetTConfiguration as simple as
-// possible (usually just overwrite the stored configuration and propagate it to
-// the wrapped TTransports/TProtocols).
-type TConfigurationSetter interface {
- SetTConfiguration(*TConfiguration)
-}
-
-// PropagateTConfiguration propagates cfg to impl if impl implements
-// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
-//
-// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
-// with everything being default value, use &TConfiguration{} explicitly instead.
-func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
- if cfg == nil || cfg.noPropagation {
- return
- }
-
- if setter, ok := impl.(TConfigurationSetter); ok {
- setter.SetTConfiguration(cfg)
- }
-}
-
-func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
- if size < 0 {
- return NewTProtocolExceptionWithType(
- NEGATIVE_SIZE,
- fmt.Errorf("negative size: %d", size),
- )
- }
- if size > cfg.GetMaxMessageSize() {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- fmt.Errorf("size exceeded max allowed: %d", size),
- )
- }
- return nil
-}
-
-type tTransportFactoryConf struct {
- delegate TTransportFactory
- cfg *TConfiguration
-}
-
-func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
- trans, err := f.delegate.GetTransport(orig)
- if err == nil {
- PropagateTConfiguration(orig, f.cfg)
- PropagateTConfiguration(trans, f.cfg)
- }
- return trans, err
-}
-
-func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.delegate, f.cfg)
- f.cfg = cfg
-}
-
-// TTransportFactoryConf wraps a TTransportFactory to propagate
-// TConfiguration on the factory's GetTransport calls.
-func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
- return &tTransportFactoryConf{
- delegate: delegate,
- cfg: conf,
- }
-}
-
-type tProtocolFactoryConf struct {
- delegate TProtocolFactory
- cfg *TConfiguration
-}
-
-func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
- proto := f.delegate.GetProtocol(trans)
- PropagateTConfiguration(trans, f.cfg)
- PropagateTConfiguration(proto, f.cfg)
- return proto
-}
-
-func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.delegate, f.cfg)
- f.cfg = cfg
-}
-
-// TProtocolFactoryConf wraps a TProtocolFactory to propagate
-// TConfiguration on the factory's GetProtocol calls.
-func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
- return &tProtocolFactoryConf{
- delegate: delegate,
- cfg: conf,
- }
-}
-
-var (
- _ TConfigurationSetter = (*tTransportFactoryConf)(nil)
- _ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/context.go b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
deleted file mode 100644
index d15c1bcf89..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/context.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-var defaultCtx = context.Background()
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
deleted file mode 100644
index 53bf862ea5..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
-)
-
-// Generic Thrift exception
-type TException interface {
- error
-
- TExceptionType() TExceptionType
-}
-
-// Prepends additional information to an error without losing the Thrift exception interface
-func PrependError(prepend string, err error) error {
- msg := prepend + err.Error()
-
- var te TException
- if errors.As(err, &te) {
- switch te.TExceptionType() {
- case TExceptionTypeTransport:
- if t, ok := err.(TTransportException); ok {
- return prependTTransportException(prepend, t)
- }
- case TExceptionTypeProtocol:
- if t, ok := err.(TProtocolException); ok {
- return prependTProtocolException(prepend, t)
- }
- case TExceptionTypeApplication:
- var t TApplicationException
- if errors.As(err, &t) {
- return NewTApplicationException(t.TypeId(), msg)
- }
- }
-
- return wrappedTException{
- err: err,
- msg: msg,
- tExceptionType: te.TExceptionType(),
- }
- }
-
- return errors.New(msg)
-}
-
-// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
-type TExceptionType byte
-
-// TExceptionType values
-const (
- TExceptionTypeUnknown TExceptionType = iota
- TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
- TExceptionTypeApplication // TApplicationExceptions
- TExceptionTypeProtocol // TProtocolExceptions
- TExceptionTypeTransport // TTransportExceptions
-)
-
-// WrapTException wraps an error into TException.
-//
-// If err is nil or already TException, it's returned as-is.
-// Otherwise it will be wraped into TException with TExceptionType() returning
-// TExceptionTypeUnknown, and Unwrap() returning the original error.
-func WrapTException(err error) TException {
- if err == nil {
- return nil
- }
-
- if te, ok := err.(TException); ok {
- return te
- }
-
- return wrappedTException{
- err: err,
- msg: err.Error(),
- tExceptionType: TExceptionTypeUnknown,
- }
-}
-
-type wrappedTException struct {
- err error
- msg string
- tExceptionType TExceptionType
-}
-
-func (w wrappedTException) Error() string {
- return w.msg
-}
-
-func (w wrappedTException) TExceptionType() TExceptionType {
- return w.tExceptionType
-}
-
-func (w wrappedTException) Unwrap() error {
- return w.err
-}
-
-var _ TException = wrappedTException{}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
deleted file mode 100644
index ca25568823..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
-type (
- headerKey string
- headerKeyList int
-)
-
-// Values for headerKeyList.
-const (
- headerKeyListRead headerKeyList = iota
- headerKeyListWrite
-)
-
-// SetHeader sets a header in the context.
-func SetHeader(ctx context.Context, key, value string) context.Context {
- return context.WithValue(
- ctx,
- headerKey(key),
- value,
- )
-}
-
-// UnsetHeader unsets a previously set header in the context.
-func UnsetHeader(ctx context.Context, key string) context.Context {
- return context.WithValue(
- ctx,
- headerKey(key),
- nil,
- )
-}
-
-// GetHeader returns a value of the given header from the context.
-func GetHeader(ctx context.Context, key string) (value string, ok bool) {
- if v := ctx.Value(headerKey(key)); v != nil {
- value, ok = v.(string)
- }
- return
-}
-
-// SetReadHeaderList sets the key list of read THeaders in the context.
-func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
- return context.WithValue(
- ctx,
- headerKeyListRead,
- keys,
- )
-}
-
-// GetReadHeaderList returns the key list of read THeaders from the context.
-func GetReadHeaderList(ctx context.Context) []string {
- if v := ctx.Value(headerKeyListRead); v != nil {
- if value, ok := v.([]string); ok {
- return value
- }
- }
- return nil
-}
-
-// SetWriteHeaderList sets the key list of THeaders to write in the context.
-func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
- return context.WithValue(
- ctx,
- headerKeyListWrite,
- keys,
- )
-}
-
-// GetWriteHeaderList returns the key list of THeaders to write from the context.
-func GetWriteHeaderList(ctx context.Context) []string {
- if v := ctx.Value(headerKeyListWrite); v != nil {
- if value, ok := v.([]string); ok {
- return value
- }
- }
- return nil
-}
-
-// AddReadTHeaderToContext adds the whole THeader headers into context.
-func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
- keys := make([]string, 0, len(headers))
- for key, value := range headers {
- ctx = SetHeader(ctx, key, value)
- keys = append(keys, key)
- }
- return SetReadHeaderList(ctx, keys)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
deleted file mode 100644
index 878041f8df..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
-)
-
-// THeaderProtocol is a thrift protocol that implements THeader:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-//
-// It supports either binary or compact protocol as the wrapped protocol.
-//
-// Most of the THeader handlings are happening inside THeaderTransport.
-type THeaderProtocol struct {
- transport *THeaderTransport
-
- // Will be initialized on first read/write.
- protocol TProtocol
-
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderProtocolConf instead.
-func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
- return newTHeaderProtocolConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
-// transport with given TConfiguration.
-//
-// The passed in transport will be wrapped with THeaderTransport.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
- return newTHeaderProtocolConf(trans, conf)
-}
-
-func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
- t := NewTHeaderTransportConf(trans, cfg)
- p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
- PropagateTConfiguration(p, cfg)
- return &THeaderProtocol{
- transport: t,
- protocol: p,
- cfg: cfg,
- }
-}
-
-type tHeaderProtocolFactory struct {
- cfg *TConfiguration
-}
-
-func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return newTHeaderProtocolConf(trans, f.cfg)
-}
-
-func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
- f.cfg = cfg
-}
-
-// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
-func NewTHeaderProtocolFactory() TProtocolFactory {
- return NewTHeaderProtocolFactoryConf(&TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
-// TConfiguration.
-func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
- return tHeaderProtocolFactory{
- cfg: conf,
- }
-}
-
-// Transport returns the underlying transport.
-//
-// It's guaranteed to be of type *THeaderTransport.
-func (p *THeaderProtocol) Transport() TTransport {
- return p.transport
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
- return p.transport.GetReadHeaders()
-}
-
-// SetWriteHeader sets a header for write.
-func (p *THeaderProtocol) SetWriteHeader(key, value string) {
- p.transport.SetWriteHeader(key, value)
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (p *THeaderProtocol) ClearWriteHeaders() {
- p.transport.ClearWriteHeaders()
-}
-
-// AddTransform add a transform for writing.
-func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
- return p.transport.AddTransform(transform)
-}
-
-func (p *THeaderProtocol) Flush(ctx context.Context) error {
- return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
- newProto, err := p.transport.Protocol().GetProtocol(p.transport)
- if err != nil {
- return err
- }
- PropagateTConfiguration(newProto, p.cfg)
- p.protocol = newProto
- p.transport.SequenceID = seqID
- return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
-}
-
-func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
- if err := p.protocol.WriteMessageEnd(ctx); err != nil {
- return err
- }
- return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
- return p.protocol.WriteStructBegin(ctx, name)
-}
-
-func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
- return p.protocol.WriteStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
- return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
-}
-
-func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
- return p.protocol.WriteFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
- return p.protocol.WriteFieldStop(ctx)
-}
-
-func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
-}
-
-func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
- return p.protocol.WriteMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- return p.protocol.WriteListBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
- return p.protocol.WriteListEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- return p.protocol.WriteSetBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
- return p.protocol.WriteSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
- return p.protocol.WriteBool(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
- return p.protocol.WriteByte(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
- return p.protocol.WriteI16(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
- return p.protocol.WriteI32(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
- return p.protocol.WriteI64(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
- return p.protocol.WriteDouble(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
- return p.protocol.WriteString(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
- return p.protocol.WriteBinary(ctx, value)
-}
-
-// ReadFrame calls underlying THeaderTransport's ReadFrame function.
-func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
- return p.transport.ReadFrame(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
- if err = p.transport.ReadFrame(ctx); err != nil {
- return
- }
-
- var newProto TProtocol
- newProto, err = p.transport.Protocol().GetProtocol(p.transport)
- if err != nil {
- var tAppExc TApplicationException
- if !errors.As(err, &tAppExc) {
- return
- }
- if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
- return
- }
- if e := tAppExc.Write(ctx, p.protocol); e != nil {
- return
- }
- if e := p.protocol.WriteMessageEnd(ctx); e != nil {
- return
- }
- if e := p.transport.Flush(ctx); e != nil {
- return
- }
- return
- }
- PropagateTConfiguration(newProto, p.cfg)
- p.protocol = newProto
-
- return p.protocol.ReadMessageBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
- return p.protocol.ReadMessageEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- return p.protocol.ReadStructBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
- return p.protocol.ReadStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
- return p.protocol.ReadFieldBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
- return p.protocol.ReadFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
- return p.protocol.ReadMapBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
- return p.protocol.ReadMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.protocol.ReadListBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
- return p.protocol.ReadListEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
- return p.protocol.ReadSetBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
- return p.protocol.ReadSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
- return p.protocol.ReadBool(ctx)
-}
-
-func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
- return p.protocol.ReadByte(ctx)
-}
-
-func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
- return p.protocol.ReadI16(ctx)
-}
-
-func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
- return p.protocol.ReadI32(ctx)
-}
-
-func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
- return p.protocol.ReadI64(ctx)
-}
-
-func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
- return p.protocol.ReadDouble(ctx)
-}
-
-func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
- return p.protocol.ReadString(ctx)
-}
-
-func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
- return p.protocol.ReadBinary(ctx)
-}
-
-func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
- return p.protocol.Skip(ctx, fieldType)
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(p.transport, cfg)
- PropagateTConfiguration(p.protocol, cfg)
- p.cfg = cfg
-}
-
-var (
- _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
- _ TConfigurationSetter = (*THeaderProtocol)(nil)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
deleted file mode 100644
index f5736df427..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
+++ /dev/null
@@ -1,810 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "bytes"
- "compress/zlib"
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
-)
-
-// Size in bytes for 32-bit ints.
-const size32 = 4
-
-type headerMeta struct {
- MagicFlags uint32
- SequenceID int32
- HeaderLength uint16
-}
-
-const headerMetaSize = 10
-
-type clientType int
-
-const (
- clientUnknown clientType = iota
- clientHeaders
- clientFramedBinary
- clientUnframedBinary
- clientFramedCompact
- clientUnframedCompact
-)
-
-// Constants defined in THeader format:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-const (
- THeaderHeaderMagic uint32 = 0x0fff0000
- THeaderHeaderMask uint32 = 0xffff0000
- THeaderFlagsMask uint32 = 0x0000ffff
- THeaderMaxFrameSize uint32 = 0x3fffffff
-)
-
-// THeaderMap is the type of the header map in THeader transport.
-type THeaderMap map[string]string
-
-// THeaderProtocolID is the wrapped protocol id used in THeader.
-type THeaderProtocolID int32
-
-// Supported THeaderProtocolID values.
-const (
- THeaderProtocolBinary THeaderProtocolID = 0x00
- THeaderProtocolCompact THeaderProtocolID = 0x02
- THeaderProtocolDefault = THeaderProtocolBinary
-)
-
-// Declared globally to avoid repetitive allocations, not really used.
-var globalMemoryBuffer = NewTMemoryBuffer()
-
-// Validate checks whether the THeaderProtocolID is a valid/supported one.
-func (id THeaderProtocolID) Validate() error {
- _, err := id.GetProtocol(globalMemoryBuffer)
- return err
-}
-
-// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
-func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
- switch id {
- default:
- return nil, NewTApplicationException(
- INVALID_PROTOCOL,
- fmt.Sprintf("THeader protocol id %d not supported", id),
- )
- case THeaderProtocolBinary:
- return NewTBinaryProtocolTransport(trans), nil
- case THeaderProtocolCompact:
- return NewTCompactProtocol(trans), nil
- }
-}
-
-// THeaderTransformID defines the numeric id of the transform used.
-type THeaderTransformID int32
-
-// THeaderTransformID values.
-//
-// Values not defined here are not currently supported, namely HMAC and Snappy.
-const (
- TransformNone THeaderTransformID = iota // 0, no special handling
- TransformZlib // 1, zlib
-)
-
-var supportedTransformIDs = map[THeaderTransformID]bool{
- TransformNone: true,
- TransformZlib: true,
-}
-
-// TransformReader is an io.ReadCloser that handles transforms reading.
-type TransformReader struct {
- io.Reader
-
- closers []io.Closer
-}
-
-var _ io.ReadCloser = (*TransformReader)(nil)
-
-// NewTransformReaderWithCapacity initializes a TransformReader with expected
-// closers capacity.
-//
-// If you don't know the closers capacity beforehand, just use
-//
-// &TransformReader{Reader: baseReader}
-//
-// instead would be sufficient.
-func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
- return &TransformReader{
- Reader: baseReader,
- closers: make([]io.Closer, 0, capacity),
- }
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tr *TransformReader) Close() error {
- // Call closers in reversed order
- for i := len(tr.closers) - 1; i >= 0; i-- {
- if err := tr.closers[i].Close(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// AddTransform adds a transform.
-func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
- switch id {
- default:
- return NewTApplicationException(
- INVALID_TRANSFORM,
- fmt.Sprintf("THeaderTransformID %d not supported", id),
- )
- case TransformNone:
- // no-op
- case TransformZlib:
- readCloser, err := zlib.NewReader(tr.Reader)
- if err != nil {
- return err
- }
- tr.Reader = readCloser
- tr.closers = append(tr.closers, readCloser)
- }
- return nil
-}
-
-// TransformWriter is an io.WriteCloser that handles transforms writing.
-type TransformWriter struct {
- io.Writer
-
- closers []io.Closer
-}
-
-var _ io.WriteCloser = (*TransformWriter)(nil)
-
-// NewTransformWriter creates a new TransformWriter with base writer and transforms.
-func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
- writer := &TransformWriter{
- Writer: baseWriter,
- closers: make([]io.Closer, 0, len(transforms)),
- }
- for _, id := range transforms {
- if err := writer.AddTransform(id); err != nil {
- return nil, err
- }
- }
- return writer, nil
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tw *TransformWriter) Close() error {
- // Call closers in reversed order
- for i := len(tw.closers) - 1; i >= 0; i-- {
- if err := tw.closers[i].Close(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// AddTransform adds a transform.
-func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
- switch id {
- default:
- return NewTApplicationException(
- INVALID_TRANSFORM,
- fmt.Sprintf("THeaderTransformID %d not supported", id),
- )
- case TransformNone:
- // no-op
- case TransformZlib:
- writeCloser := zlib.NewWriter(tw.Writer)
- tw.Writer = writeCloser
- tw.closers = append(tw.closers, writeCloser)
- }
- return nil
-}
-
-// THeaderInfoType is the type id of the info headers.
-type THeaderInfoType int32
-
-// Supported THeaderInfoType values.
-const (
- _ THeaderInfoType = iota // Skip 0
- InfoKeyValue // 1
- // Rest of the info types are not supported.
-)
-
-// THeaderTransport is a Transport mode that implements THeader.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-type THeaderTransport struct {
- SequenceID int32
- Flags uint32
-
- transport TTransport
-
- // THeaderMap for read and write
- readHeaders THeaderMap
- writeHeaders THeaderMap
-
- // Reading related variables.
- reader *bufio.Reader
- // When frame is detected, we read the frame fully into frameBuffer.
- frameBuffer bytes.Buffer
- // When it's non-nil, Read should read from frameReader instead of
- // reader, and EOF error indicates end of frame instead of end of all
- // transport.
- frameReader io.ReadCloser
-
- // Writing related variables
- writeBuffer bytes.Buffer
- writeTransforms []THeaderTransformID
-
- clientType clientType
- protocolID THeaderProtocolID
- cfg *TConfiguration
-
- // buffer is used in the following scenarios to avoid repetitive
- // allocations, while 4 is big enough for all those scenarios:
- //
- // * header padding (max size 4)
- // * write the frame size (size 4)
- buffer [4]byte
-}
-
-var _ TTransport = (*THeaderTransport)(nil)
-
-// Deprecated: Use NewTHeaderTransportConf instead.
-func NewTHeaderTransport(trans TTransport) *THeaderTransport {
- return NewTHeaderTransportConf(trans, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderTransportConf creates THeaderTransport from the
-// underlying transport, with given TConfiguration attached.
-//
-// If trans is already a *THeaderTransport, it will be returned as is,
-// but with TConfiguration overridden by the value passed in.
-//
-// The protocol ID in TConfiguration is only useful for client transports.
-// For servers,
-// the protocol ID will be overridden again to the one set by the client,
-// to ensure that servers always speak the same dialect as the client.
-func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
- if ht, ok := trans.(*THeaderTransport); ok {
- ht.SetTConfiguration(conf)
- return ht
- }
- PropagateTConfiguration(trans, conf)
- return &THeaderTransport{
- transport: trans,
- reader: bufio.NewReader(trans),
- writeHeaders: make(THeaderMap),
- protocolID: conf.GetTHeaderProtocolID(),
- cfg: conf,
- }
-}
-
-// Open calls the underlying transport's Open function.
-func (t *THeaderTransport) Open() error {
- return t.transport.Open()
-}
-
-// IsOpen calls the underlying transport's IsOpen function.
-func (t *THeaderTransport) IsOpen() bool {
- return t.transport.IsOpen()
-}
-
-// ReadFrame tries to read the frame header, guess the client type, and handle
-// unframed clients.
-func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
- if !t.needReadFrame() {
- // No need to read frame, skipping.
- return nil
- }
-
- // Peek and handle the first 32 bits.
- // They could either be the length field of a framed message,
- // or the first bytes of an unframed message.
- var buf []byte
- var err error
- // This is also usually the first read from a connection,
- // so handle retries around socket timeouts.
- _, deadlineSet := ctx.Deadline()
- for {
- buf, err = t.reader.Peek(size32)
- if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
- // This is I/O timeout and we still have time,
- // continue trying
- continue
- }
- // For anything else, do not retry
- break
- }
- if err != nil {
- return err
- }
-
- frameSize := binary.BigEndian.Uint32(buf)
- if frameSize&VERSION_MASK == VERSION_1 {
- t.clientType = clientUnframedBinary
- return nil
- }
- if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
- t.clientType = clientUnframedCompact
- return nil
- }
-
- // At this point it should be a framed message,
- // sanity check on frameSize then discard the peeked part.
- if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- errors.New("frame too large"),
- )
- }
- t.reader.Discard(size32)
-
- // Read the frame fully into frameBuffer.
- _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
- if err != nil {
- return err
- }
- t.frameReader = ioutil.NopCloser(&t.frameBuffer)
-
- // Peek and handle the next 32 bits.
- buf = t.frameBuffer.Bytes()[:size32]
- version := binary.BigEndian.Uint32(buf)
- if version&THeaderHeaderMask == THeaderHeaderMagic {
- t.clientType = clientHeaders
- return t.parseHeaders(ctx, frameSize)
- }
- if version&VERSION_MASK == VERSION_1 {
- t.clientType = clientFramedBinary
- return nil
- }
- if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
- t.clientType = clientFramedCompact
- return nil
- }
- if err := t.endOfFrame(); err != nil {
- return err
- }
- return NewTProtocolExceptionWithType(
- NOT_IMPLEMENTED,
- errors.New("unsupported client transport type"),
- )
-}
-
-// endOfFrame does end of frame handling.
-//
-// It closes frameReader, and also resets frame related states.
-func (t *THeaderTransport) endOfFrame() error {
- defer func() {
- t.frameBuffer.Reset()
- t.frameReader = nil
- }()
- return t.frameReader.Close()
-}
-
-func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
- if t.clientType != clientHeaders {
- return nil
- }
-
- var err error
- var meta headerMeta
- if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
- return err
- }
- frameSize -= headerMetaSize
- t.Flags = meta.MagicFlags & THeaderFlagsMask
- t.SequenceID = meta.SequenceID
- headerLength := int64(meta.HeaderLength) * 4
- if int64(frameSize) < headerLength {
- return NewTProtocolExceptionWithType(
- SIZE_LIMIT,
- errors.New("header size is larger than the whole frame"),
- )
- }
- headerBuf := NewTMemoryBuffer()
- _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
- if err != nil {
- return err
- }
- hp := NewTCompactProtocol(headerBuf)
- hp.SetTConfiguration(t.cfg)
-
- // At this point the header is already read into headerBuf,
- // and t.frameBuffer starts from the actual payload.
- protoID, err := hp.readVarint32()
- if err != nil {
- return err
- }
- t.protocolID = THeaderProtocolID(protoID)
-
- var transformCount int32
- transformCount, err = hp.readVarint32()
- if err != nil {
- return err
- }
- if transformCount > 0 {
- reader := NewTransformReaderWithCapacity(
- &t.frameBuffer,
- int(transformCount),
- )
- t.frameReader = reader
- transformIDs := make([]THeaderTransformID, transformCount)
- for i := 0; i < int(transformCount); i++ {
- id, err := hp.readVarint32()
- if err != nil {
- return err
- }
- transformIDs[i] = THeaderTransformID(id)
- }
- // The transform IDs on the wire was added based on the order of
- // writing, so on the reading side we need to reverse the order.
- for i := transformCount - 1; i >= 0; i-- {
- id := transformIDs[i]
- if err := reader.AddTransform(id); err != nil {
- return err
- }
- }
- }
-
- // The info part does not use the transforms yet, so it's
- // important to continue using headerBuf.
- headers := make(THeaderMap)
- for {
- infoType, err := hp.readVarint32()
- if errors.Is(err, io.EOF) {
- break
- }
- if err != nil {
- return err
- }
- if THeaderInfoType(infoType) == InfoKeyValue {
- count, err := hp.readVarint32()
- if err != nil {
- return err
- }
- for i := 0; i < int(count); i++ {
- key, err := hp.ReadString(ctx)
- if err != nil {
- return err
- }
- value, err := hp.ReadString(ctx)
- if err != nil {
- return err
- }
- headers[key] = value
- }
- } else {
- // Skip reading info section on the first
- // unsupported info type.
- break
- }
- }
- t.readHeaders = headers
-
- return nil
-}
-
-func (t *THeaderTransport) needReadFrame() bool {
- if t.clientType == clientUnknown {
- // This is a new connection that's never read before.
- return true
- }
- if t.isFramed() && t.frameReader == nil {
- // We just finished the last frame.
- return true
- }
- return false
-}
-
-func (t *THeaderTransport) Read(p []byte) (read int, err error) {
- // Here using context.Background instead of a context passed in is safe.
- // First is that there's no way to pass context into this function.
- // Then, 99% of the case when calling this Read frame is already read
- // into frameReader. ReadFrame here is more of preventing bugs that
- // didn't call ReadFrame before calling Read.
- err = t.ReadFrame(context.Background())
- if err != nil {
- return
- }
- if t.frameReader != nil {
- read, err = t.frameReader.Read(p)
- if err == nil && t.frameBuffer.Len() <= 0 {
- // the last Read finished the frame, do endOfFrame
- // handling here.
- err = t.endOfFrame()
- } else if err == io.EOF {
- err = t.endOfFrame()
- if err != nil {
- return
- }
- if read == 0 {
- // Try to read the next frame when we hit EOF
- // (end of frame) immediately.
- // When we got here, it means the last read
- // finished the previous frame, but didn't
- // do endOfFrame handling yet.
- // We have to read the next frame here,
- // as otherwise we would return 0 and nil,
- // which is a case not handled well by most
- // protocol implementations.
- return t.Read(p)
- }
- }
- return
- }
- return t.reader.Read(p)
-}
-
-// Write writes data to the write buffer.
-//
-// You need to call Flush to actually write them to the transport.
-func (t *THeaderTransport) Write(p []byte) (int, error) {
- return t.writeBuffer.Write(p)
-}
-
-// Flush writes the appropriate header and the write buffer to the underlying transport.
-func (t *THeaderTransport) Flush(ctx context.Context) error {
- if t.writeBuffer.Len() == 0 {
- return nil
- }
-
- defer t.writeBuffer.Reset()
-
- switch t.clientType {
- default:
- fallthrough
- case clientUnknown:
- t.clientType = clientHeaders
- fallthrough
- case clientHeaders:
- headers := NewTMemoryBuffer()
- hp := NewTCompactProtocol(headers)
- hp.SetTConfiguration(t.cfg)
- if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- for _, transform := range t.writeTransforms {
- if _, err := hp.writeVarint32(int32(transform)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
- if len(t.writeHeaders) > 0 {
- if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- for key, value := range t.writeHeaders {
- if err := hp.WriteString(ctx, key); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if err := hp.WriteString(ctx, value); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
- }
- padding := 4 - headers.Len()%4
- if padding < 4 {
- buf := t.buffer[:padding]
- for i := range buf {
- buf[i] = 0
- }
- if _, err := headers.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
-
- var payload bytes.Buffer
- meta := headerMeta{
- MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
- SequenceID: t.SequenceID,
- HeaderLength: uint16(headers.Len() / 4),
- }
- if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := io.Copy(&payload, headers); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- writer, err := NewTransformWriter(&payload, t.writeTransforms)
- if err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- if err := writer.Close(); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- // First write frame length
- buf := t.buffer[:size32]
- binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
- if _, err := t.transport.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- // Then write the payload
- if _, err := io.Copy(t.transport, &payload); err != nil {
- return NewTTransportExceptionFromError(err)
- }
-
- case clientFramedBinary, clientFramedCompact:
- buf := t.buffer[:size32]
- binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
- if _, err := t.transport.Write(buf); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- fallthrough
- case clientUnframedBinary, clientUnframedCompact:
- if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
- return NewTTransportExceptionFromError(err)
- }
- }
-
- select {
- default:
- case <-ctx.Done():
- return NewTTransportExceptionFromError(ctx.Err())
- }
-
- return t.transport.Flush(ctx)
-}
-
-// Close closes the transport, along with its underlying transport.
-func (t *THeaderTransport) Close() error {
- if err := t.Flush(context.Background()); err != nil {
- return err
- }
- return t.transport.Close()
-}
-
-// RemainingBytes calls underlying transport's RemainingBytes.
-//
-// Even in framed cases, because of all the possible compression transforms
-// involved, the remaining frame size is likely to be different from the actual
-// remaining readable bytes, so we don't bother to keep tracking the remaining
-// frame size by ourselves and just use the underlying transport's
-// RemainingBytes directly.
-func (t *THeaderTransport) RemainingBytes() uint64 {
- return t.transport.RemainingBytes()
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (t *THeaderTransport) GetReadHeaders() THeaderMap {
- return t.readHeaders
-}
-
-// SetWriteHeader sets a header for write.
-func (t *THeaderTransport) SetWriteHeader(key, value string) {
- t.writeHeaders[key] = value
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (t *THeaderTransport) ClearWriteHeaders() {
- t.writeHeaders = make(THeaderMap)
-}
-
-// AddTransform add a transform for writing.
-func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
- if !supportedTransformIDs[transform] {
- return NewTProtocolExceptionWithType(
- NOT_IMPLEMENTED,
- fmt.Errorf("THeaderTransformID %d not supported", transform),
- )
- }
- t.writeTransforms = append(t.writeTransforms, transform)
- return nil
-}
-
-// Protocol returns the wrapped protocol id used in this THeaderTransport.
-func (t *THeaderTransport) Protocol() THeaderProtocolID {
- switch t.clientType {
- default:
- return t.protocolID
- case clientFramedBinary, clientUnframedBinary:
- return THeaderProtocolBinary
- case clientFramedCompact, clientUnframedCompact:
- return THeaderProtocolCompact
- }
-}
-
-func (t *THeaderTransport) isFramed() bool {
- switch t.clientType {
- default:
- return false
- case clientHeaders, clientFramedBinary, clientFramedCompact:
- return true
- }
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(t.transport, cfg)
- t.cfg = cfg
-}
-
-// THeaderTransportFactory is a TTransportFactory implementation to create
-// THeaderTransport.
-//
-// It also implements TConfigurationSetter.
-type THeaderTransportFactory struct {
- // The underlying factory, could be nil.
- Factory TTransportFactory
-
- cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderTransportFactoryConf instead.
-func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
- return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
- noPropagation: true,
- })
-}
-
-// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
-// the given *TConfiguration.
-func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
- return &THeaderTransportFactory{
- Factory: factory,
-
- cfg: conf,
- }
-}
-
-// GetTransport implements TTransportFactory.
-func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if f.Factory != nil {
- t, err := f.Factory.GetTransport(trans)
- if err != nil {
- return nil, err
- }
- return NewTHeaderTransportConf(t, f.cfg), nil
- }
- return NewTHeaderTransportConf(trans, f.cfg), nil
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
- PropagateTConfiguration(f.Factory, f.cfg)
- f.cfg = cfg
-}
-
-var (
- _ TConfigurationSetter = (*THeaderTransportFactory)(nil)
- _ TConfigurationSetter = (*THeaderTransport)(nil)
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
deleted file mode 100644
index 50d44ec8ea..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "log"
- "os"
-)
-
-// Logger is a simple wrapper of a logging function.
-//
-// In reality the users might actually use different logging libraries, and they
-// are not always compatible with each other.
-//
-// Logger is meant to be a simple common ground that it's easy to wrap whatever
-// logging library they use into.
-//
-// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
-// discussion behind it.
-type Logger func(msg string)
-
-// NopLogger is a Logger implementation that does nothing.
-func NopLogger(msg string) {}
-
-// StdLogger wraps stdlib log package into a Logger.
-//
-// If logger passed in is nil, it will fallback to use stderr and default flags.
-func StdLogger(logger *log.Logger) Logger {
- if logger == nil {
- logger = log.New(os.Stderr, "", log.LstdFlags)
- }
- return func(msg string) {
- logger.Print(msg)
- }
-}
-
-func fallbackLogger(logger Logger) Logger {
- if logger == nil {
- return StdLogger(nil)
- }
- return logger
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
deleted file mode 100644
index 5936d27303..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bytes"
- "context"
-)
-
-// Memory buffer-based implementation of the TTransport interface.
-type TMemoryBuffer struct {
- *bytes.Buffer
- size int
-}
-
-type TMemoryBufferTransportFactory struct {
- size int
-}
-
-func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- if trans != nil {
- t, ok := trans.(*TMemoryBuffer)
- if ok && t.size > 0 {
- return NewTMemoryBufferLen(t.size), nil
- }
- }
- return NewTMemoryBufferLen(p.size), nil
-}
-
-func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
- return &TMemoryBufferTransportFactory{size: size}
-}
-
-func NewTMemoryBuffer() *TMemoryBuffer {
- return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
-}
-
-func NewTMemoryBufferLen(size int) *TMemoryBuffer {
- buf := make([]byte, 0, size)
- return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
-}
-
-func (p *TMemoryBuffer) IsOpen() bool {
- return true
-}
-
-func (p *TMemoryBuffer) Open() error {
- return nil
-}
-
-func (p *TMemoryBuffer) Close() error {
- p.Buffer.Reset()
- return nil
-}
-
-// Flushing a memory buffer is a no-op
-func (p *TMemoryBuffer) Flush(ctx context.Context) error {
- return nil
-}
-
-func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
- return uint64(p.Buffer.Len())
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
deleted file mode 100644
index 25ab2e98a2..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Message type constants in the Thrift protocol.
-type TMessageType int32
-
-const (
- INVALID_TMESSAGE_TYPE TMessageType = 0
- CALL TMessageType = 1
- REPLY TMessageType = 2
- EXCEPTION TMessageType = 3
- ONEWAY TMessageType = 4
-)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
deleted file mode 100644
index e4512d204c..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "math"
- "strconv"
-)
-
-type Numeric interface {
- Int64() int64
- Int32() int32
- Int16() int16
- Byte() byte
- Int() int
- Float64() float64
- Float32() float32
- String() string
- isNull() bool
-}
-
-type numeric struct {
- iValue int64
- dValue float64
- sValue string
- isNil bool
-}
-
-var (
- INFINITY Numeric
- NEGATIVE_INFINITY Numeric
- NAN Numeric
- ZERO Numeric
- NUMERIC_NULL Numeric
-)
-
-func NewNumericFromDouble(dValue float64) Numeric {
- if math.IsInf(dValue, 1) {
- return INFINITY
- }
- if math.IsInf(dValue, -1) {
- return NEGATIVE_INFINITY
- }
- if math.IsNaN(dValue) {
- return NAN
- }
- iValue := int64(dValue)
- sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
- isNil := false
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI64(iValue int64) Numeric {
- dValue := float64(iValue)
- sValue := strconv.FormatInt(iValue, 10)
- isNil := false
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI32(iValue int32) Numeric {
- dValue := float64(iValue)
- sValue := strconv.FormatInt(int64(iValue), 10)
- isNil := false
- return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromString(sValue string) Numeric {
- if sValue == INFINITY.String() {
- return INFINITY
- }
- if sValue == NEGATIVE_INFINITY.String() {
- return NEGATIVE_INFINITY
- }
- if sValue == NAN.String() {
- return NAN
- }
- iValue, _ := strconv.ParseInt(sValue, 10, 64)
- dValue, _ := strconv.ParseFloat(sValue, 64)
- isNil := len(sValue) == 0
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
- if isNull {
- return NewNullNumeric()
- }
- if sValue == JSON_INFINITY {
- return INFINITY
- }
- if sValue == JSON_NEGATIVE_INFINITY {
- return NEGATIVE_INFINITY
- }
- if sValue == JSON_NAN {
- return NAN
- }
- iValue, _ := strconv.ParseInt(sValue, 10, 64)
- dValue, _ := strconv.ParseFloat(sValue, 64)
- return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
-}
-
-func NewNullNumeric() Numeric {
- return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
-}
-
-func (p *numeric) Int64() int64 {
- return p.iValue
-}
-
-func (p *numeric) Int32() int32 {
- return int32(p.iValue)
-}
-
-func (p *numeric) Int16() int16 {
- return int16(p.iValue)
-}
-
-func (p *numeric) Byte() byte {
- return byte(p.iValue)
-}
-
-func (p *numeric) Int() int {
- return int(p.iValue)
-}
-
-func (p *numeric) Float64() float64 {
- return p.dValue
-}
-
-func (p *numeric) Float32() float32 {
- return float32(p.dValue)
-}
-
-func (p *numeric) String() string {
- return p.sValue
-}
-
-func (p *numeric) isNull() bool {
- return p.isNil
-}
-
-func init() {
- INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
- NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
- NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
- ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
- NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
deleted file mode 100644
index 245a3ccfc9..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
- Process(ctx context.Context, in, out TProtocol) (bool, TException)
-
- // ProcessorMap returns a map of thrift method names to TProcessorFunctions.
- ProcessorMap() map[string]TProcessorFunction
-
- // AddToProcessorMap adds the given TProcessorFunction to the internal
- // processor map at the given key.
- //
- // If one is already set at the given key, it will be replaced with the new
- // TProcessorFunction.
- AddToProcessorMap(string, TProcessorFunction)
-}
-
-type TProcessorFunction interface {
- Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
-}
-
-// The default processor factory just returns a singleton
-// instance.
-type TProcessorFactory interface {
- GetProcessor(trans TTransport) TProcessor
-}
-
-type tProcessorFactory struct {
- processor TProcessor
-}
-
-func NewTProcessorFactory(p TProcessor) TProcessorFactory {
- return &tProcessorFactory{processor: p}
-}
-
-func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
- return p.processor
-}
-
-/**
- * The default processor factory just returns a singleton
- * instance.
- */
-type TProcessorFunctionFactory interface {
- GetProcessorFunction(trans TTransport) TProcessorFunction
-}
-
-type tProcessorFunctionFactory struct {
- processor TProcessorFunction
-}
-
-func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
- return &tProcessorFunctionFactory{processor: p}
-}
-
-func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
- return p.processor
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
deleted file mode 100644
index 0a69bd4162..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
- "fmt"
-)
-
-const (
- VERSION_MASK = 0xffff0000
- VERSION_1 = 0x80010000
-)
-
-type TProtocol interface {
- WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
- WriteMessageEnd(ctx context.Context) error
- WriteStructBegin(ctx context.Context, name string) error
- WriteStructEnd(ctx context.Context) error
- WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
- WriteFieldEnd(ctx context.Context) error
- WriteFieldStop(ctx context.Context) error
- WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
- WriteMapEnd(ctx context.Context) error
- WriteListBegin(ctx context.Context, elemType TType, size int) error
- WriteListEnd(ctx context.Context) error
- WriteSetBegin(ctx context.Context, elemType TType, size int) error
- WriteSetEnd(ctx context.Context) error
- WriteBool(ctx context.Context, value bool) error
- WriteByte(ctx context.Context, value int8) error
- WriteI16(ctx context.Context, value int16) error
- WriteI32(ctx context.Context, value int32) error
- WriteI64(ctx context.Context, value int64) error
- WriteDouble(ctx context.Context, value float64) error
- WriteString(ctx context.Context, value string) error
- WriteBinary(ctx context.Context, value []byte) error
-
- ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
- ReadMessageEnd(ctx context.Context) error
- ReadStructBegin(ctx context.Context) (name string, err error)
- ReadStructEnd(ctx context.Context) error
- ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
- ReadFieldEnd(ctx context.Context) error
- ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
- ReadMapEnd(ctx context.Context) error
- ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
- ReadListEnd(ctx context.Context) error
- ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
- ReadSetEnd(ctx context.Context) error
- ReadBool(ctx context.Context) (value bool, err error)
- ReadByte(ctx context.Context) (value int8, err error)
- ReadI16(ctx context.Context) (value int16, err error)
- ReadI32(ctx context.Context) (value int32, err error)
- ReadI64(ctx context.Context) (value int64, err error)
- ReadDouble(ctx context.Context) (value float64, err error)
- ReadString(ctx context.Context) (value string, err error)
- ReadBinary(ctx context.Context) (value []byte, err error)
-
- Skip(ctx context.Context, fieldType TType) (err error)
- Flush(ctx context.Context) (err error)
-
- Transport() TTransport
-}
-
-// The maximum recursive depth the skip() function will traverse
-const DEFAULT_RECURSION_DEPTH = 64
-
-// Skips over the next data element from the provided input TProtocol object.
-func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
- return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
-}
-
-// Skips over the next data element from the provided input TProtocol object.
-func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
-
- if maxDepth <= 0 {
- return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
- }
-
- switch fieldType {
- case BOOL:
- _, err = self.ReadBool(ctx)
- return
- case BYTE:
- _, err = self.ReadByte(ctx)
- return
- case I16:
- _, err = self.ReadI16(ctx)
- return
- case I32:
- _, err = self.ReadI32(ctx)
- return
- case I64:
- _, err = self.ReadI64(ctx)
- return
- case DOUBLE:
- _, err = self.ReadDouble(ctx)
- return
- case STRING:
- _, err = self.ReadString(ctx)
- return
- case STRUCT:
- if _, err = self.ReadStructBegin(ctx); err != nil {
- return err
- }
- for {
- _, typeId, _, _ := self.ReadFieldBegin(ctx)
- if typeId == STOP {
- break
- }
- err := Skip(ctx, self, typeId, maxDepth-1)
- if err != nil {
- return err
- }
- self.ReadFieldEnd(ctx)
- }
- return self.ReadStructEnd(ctx)
- case MAP:
- keyType, valueType, size, err := self.ReadMapBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, keyType, maxDepth-1)
- if err != nil {
- return err
- }
- self.Skip(ctx, valueType)
- }
- return self.ReadMapEnd(ctx)
- case SET:
- elemType, size, err := self.ReadSetBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, elemType, maxDepth-1)
- if err != nil {
- return err
- }
- }
- return self.ReadSetEnd(ctx)
- case LIST:
- elemType, size, err := self.ReadListBegin(ctx)
- if err != nil {
- return err
- }
- for i := 0; i < size; i++ {
- err := Skip(ctx, self, elemType, maxDepth-1)
- if err != nil {
- return err
- }
- }
- return self.ReadListEnd(ctx)
- default:
- return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
deleted file mode 100644
index 9dcf4bfd94..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "encoding/base64"
- "errors"
-)
-
-// Thrift Protocol exception
-type TProtocolException interface {
- TException
- TypeId() int
-}
-
-const (
- UNKNOWN_PROTOCOL_EXCEPTION = 0
- INVALID_DATA = 1
- NEGATIVE_SIZE = 2
- SIZE_LIMIT = 3
- BAD_VERSION = 4
- NOT_IMPLEMENTED = 5
- DEPTH_LIMIT = 6
-)
-
-type tProtocolException struct {
- typeId int
- err error
- msg string
-}
-
-var _ TProtocolException = (*tProtocolException)(nil)
-
-func (tProtocolException) TExceptionType() TExceptionType {
- return TExceptionTypeProtocol
-}
-
-func (p *tProtocolException) TypeId() int {
- return p.typeId
-}
-
-func (p *tProtocolException) String() string {
- return p.msg
-}
-
-func (p *tProtocolException) Error() string {
- return p.msg
-}
-
-func (p *tProtocolException) Unwrap() error {
- return p.err
-}
-
-func NewTProtocolException(err error) TProtocolException {
- if err == nil {
- return nil
- }
-
- if e, ok := err.(TProtocolException); ok {
- return e
- }
-
- if errors.As(err, new(base64.CorruptInputError)) {
- return NewTProtocolExceptionWithType(INVALID_DATA, err)
- }
-
- return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
-}
-
-func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
- if err == nil {
- return nil
- }
- return &tProtocolException{
- typeId: errType,
- err: err,
- msg: err.Error(),
- }
-}
-
-func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
- return &tProtocolException{
- typeId: err.TypeId(),
- err: err,
- msg: prepend + err.Error(),
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
deleted file mode 100644
index c40f796d88..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory interface for constructing protocol instances.
-type TProtocolFactory interface {
- GetProtocol(trans TTransport) TProtocol
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
deleted file mode 100644
index 02f0613956..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
-)
-
-// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
-type responseHelperKey struct{}
-
-// TResponseHelper defines a object with a set of helper functions that can be
-// retrieved from the context object passed into server handler functions.
-//
-// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
-// from the context object.
-//
-// The zero value of TResponseHelper is valid with all helper functions being
-// no-op.
-type TResponseHelper struct {
- // THeader related functions
- *THeaderResponseHelper
-}
-
-// THeaderResponseHelper defines THeader related TResponseHelper functions.
-//
-// The zero value of *THeaderResponseHelper is valid with all helper functions
-// being no-op.
-type THeaderResponseHelper struct {
- proto *THeaderProtocol
-}
-
-// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
-// underlying TProtocol.
-func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
- if hp, ok := proto.(*THeaderProtocol); ok {
- return &THeaderResponseHelper{
- proto: hp,
- }
- }
- return nil
-}
-
-// SetHeader sets a response header.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) SetHeader(key, value string) {
- if h != nil && h.proto != nil {
- h.proto.SetWriteHeader(key, value)
- }
-}
-
-// ClearHeaders clears all the response headers previously set.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) ClearHeaders() {
- if h != nil && h.proto != nil {
- h.proto.ClearWriteHeaders()
- }
-}
-
-// GetResponseHelper retrieves the TResponseHelper implementation injected into
-// the context object.
-//
-// If no helper was found in the context object, a nop helper with ok == false
-// will be returned.
-func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
- if v := ctx.Value(responseHelperKey{}); v != nil {
- helper, ok = v.(TResponseHelper)
- }
- return
-}
-
-// SetResponseHelper injects TResponseHelper into the context object.
-func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
- return context.WithValue(ctx, responseHelperKey{}, helper)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
deleted file mode 100644
index 83fdf29f5c..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "io"
-)
-
-type RichTransport struct {
- TTransport
-}
-
-// Wraps Transport to provide TRichTransport interface
-func NewTRichTransport(trans TTransport) *RichTransport {
- return &RichTransport{trans}
-}
-
-func (r *RichTransport) ReadByte() (c byte, err error) {
- return readByte(r.TTransport)
-}
-
-func (r *RichTransport) WriteByte(c byte) error {
- return writeByte(r.TTransport, c)
-}
-
-func (r *RichTransport) WriteString(s string) (n int, err error) {
- return r.Write([]byte(s))
-}
-
-func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
- return r.TTransport.RemainingBytes()
-}
-
-func readByte(r io.Reader) (c byte, err error) {
- v := [1]byte{0}
- n, err := r.Read(v[0:1])
- if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
- return v[0], nil
- }
- if n > 0 && err != nil {
- return v[0], err
- }
- if err != nil {
- return 0, err
- }
- return v[0], nil
-}
-
-func writeByte(w io.Writer, c byte) error {
- v := [1]byte{c}
- _, err := w.Write(v[0:1])
- return err
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
deleted file mode 100644
index c44979094c..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "sync"
-)
-
-type TSerializer struct {
- Transport *TMemoryBuffer
- Protocol TProtocol
-}
-
-type TStruct interface {
- Write(ctx context.Context, p TProtocol) error
- Read(ctx context.Context, p TProtocol) error
-}
-
-func NewTSerializer() *TSerializer {
- transport := NewTMemoryBufferLen(1024)
- protocol := NewTBinaryProtocolTransport(transport)
-
- return &TSerializer{
- Transport: transport,
- Protocol: protocol,
- }
-}
-
-func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
- t.Transport.Reset()
-
- if err = msg.Write(ctx, t.Protocol); err != nil {
- return
- }
-
- if err = t.Protocol.Flush(ctx); err != nil {
- return
- }
- if err = t.Transport.Flush(ctx); err != nil {
- return
- }
-
- return t.Transport.String(), nil
-}
-
-func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
- t.Transport.Reset()
-
- if err = msg.Write(ctx, t.Protocol); err != nil {
- return
- }
-
- if err = t.Protocol.Flush(ctx); err != nil {
- return
- }
-
- if err = t.Transport.Flush(ctx); err != nil {
- return
- }
-
- b = append(b, t.Transport.Bytes()...)
- return
-}
-
-// TSerializerPool is the thread-safe version of TSerializer, it uses resource
-// pool of TSerializer under the hood.
-//
-// It must be initialized with either NewTSerializerPool or
-// NewTSerializerPoolSizeFactory.
-type TSerializerPool struct {
- pool sync.Pool
-}
-
-// NewTSerializerPool creates a new TSerializerPool.
-//
-// NewTSerializer can be used as the arg here.
-func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
- return &TSerializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- return f()
- },
- },
- }
-}
-
-// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
-// size and protocol factory.
-//
-// Note that the size is not the limit. The TMemoryBuffer underneath can grow
-// larger than that. It just dictates the initial size.
-func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
- return &TSerializerPool{
- pool: sync.Pool{
- New: func() interface{} {
- transport := NewTMemoryBufferLen(size)
- protocol := factory.GetProtocol(transport)
-
- return &TSerializer{
- Transport: transport,
- Protocol: protocol,
- }
- },
- },
- }
-}
-
-func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
- s := t.pool.Get().(*TSerializer)
- defer t.pool.Put(s)
- return s.WriteString(ctx, msg)
-}
-
-func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
- s := t.pool.Get().(*TSerializer)
- defer t.pool.Put(s)
- return s.Write(ctx, msg)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
deleted file mode 100644
index 51c40b64a1..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Server transport. Object which provides client transports.
-type TServerTransport interface {
- Listen() error
- Accept() (TTransport, error)
- Close() error
-
- // Optional method implementation. This signals to the server transport
- // that it should break out of any accept() or listen() that it is currently
- // blocked on. This method, if implemented, MUST be thread safe, as it may
- // be called from a different thread context than the other TServerTransport
- // methods.
- Interrupt() error
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
deleted file mode 100644
index d1a8154532..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
+++ /dev/null
@@ -1,1373 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "strconv"
-)
-
-type _ParseContext int
-
-const (
- _CONTEXT_INVALID _ParseContext = iota
- _CONTEXT_IN_TOPLEVEL // 1
- _CONTEXT_IN_LIST_FIRST // 2
- _CONTEXT_IN_LIST // 3
- _CONTEXT_IN_OBJECT_FIRST // 4
- _CONTEXT_IN_OBJECT_NEXT_KEY // 5
- _CONTEXT_IN_OBJECT_NEXT_VALUE // 6
-)
-
-func (p _ParseContext) String() string {
- switch p {
- case _CONTEXT_IN_TOPLEVEL:
- return "TOPLEVEL"
- case _CONTEXT_IN_LIST_FIRST:
- return "LIST-FIRST"
- case _CONTEXT_IN_LIST:
- return "LIST"
- case _CONTEXT_IN_OBJECT_FIRST:
- return "OBJECT-FIRST"
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- return "OBJECT-NEXT-KEY"
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- return "OBJECT-NEXT-VALUE"
- }
- return "UNKNOWN-PARSE-CONTEXT"
-}
-
-type jsonContextStack []_ParseContext
-
-func (s *jsonContextStack) push(v _ParseContext) {
- *s = append(*s, v)
-}
-
-func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
- l := len(s)
- if l <= 0 {
- return
- }
- return s[l-1], true
-}
-
-func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
- l := len(*s)
- if l <= 0 {
- return
- }
- v = (*s)[l-1]
- *s = (*s)[0 : l-1]
- return v, true
-}
-
-var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
-
-// Simple JSON protocol implementation for thrift.
-//
-// This protocol produces/consumes a simple output format
-// suitable for parsing by scripting languages. It should not be
-// confused with the full-featured TJSONProtocol.
-//
-type TSimpleJSONProtocol struct {
- trans TTransport
-
- parseContextStack jsonContextStack
- dumpContext jsonContextStack
-
- writer *bufio.Writer
- reader *bufio.Reader
-}
-
-// Constructor
-func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
- v := &TSimpleJSONProtocol{trans: t,
- writer: bufio.NewWriter(t),
- reader: bufio.NewReader(t),
- }
- v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
- v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
- return v
-}
-
-// Factory
-type TSimpleJSONProtocolFactory struct{}
-
-func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
- return NewTSimpleJSONProtocol(trans)
-}
-
-func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
- return &TSimpleJSONProtocolFactory{}
-}
-
-var (
- JSON_COMMA []byte
- JSON_COLON []byte
- JSON_LBRACE []byte
- JSON_RBRACE []byte
- JSON_LBRACKET []byte
- JSON_RBRACKET []byte
- JSON_QUOTE byte
- JSON_QUOTE_BYTES []byte
- JSON_NULL []byte
- JSON_TRUE []byte
- JSON_FALSE []byte
- JSON_INFINITY string
- JSON_NEGATIVE_INFINITY string
- JSON_NAN string
- JSON_INFINITY_BYTES []byte
- JSON_NEGATIVE_INFINITY_BYTES []byte
- JSON_NAN_BYTES []byte
- json_nonbase_map_elem_bytes []byte
-)
-
-func init() {
- JSON_COMMA = []byte{','}
- JSON_COLON = []byte{':'}
- JSON_LBRACE = []byte{'{'}
- JSON_RBRACE = []byte{'}'}
- JSON_LBRACKET = []byte{'['}
- JSON_RBRACKET = []byte{']'}
- JSON_QUOTE = '"'
- JSON_QUOTE_BYTES = []byte{'"'}
- JSON_NULL = []byte{'n', 'u', 'l', 'l'}
- JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
- JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
- JSON_INFINITY = "Infinity"
- JSON_NEGATIVE_INFINITY = "-Infinity"
- JSON_NAN = "NaN"
- JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
- JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
- JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
- json_nonbase_map_elem_bytes = []byte{']', ',', '['}
-}
-
-func jsonQuote(s string) string {
- b, _ := json.Marshal(s)
- s1 := string(b)
- return s1
-}
-
-func jsonUnquote(s string) (string, bool) {
- s1 := new(string)
- err := json.Unmarshal([]byte(s), s1)
- return *s1, err == nil
-}
-
-func mismatch(expected, actual string) error {
- return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
- p.resetContextStack() // THRIFT-3735
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.WriteString(ctx, name); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(typeId)); e != nil {
- return e
- }
- if e := p.WriteI32(ctx, seqId); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
- if e := p.OutputObjectBegin(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
- return p.OutputObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
- if e := p.WriteString(ctx, name); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-
-func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(keyType)); e != nil {
- return e
- }
- if e := p.WriteByte(ctx, int8(valueType)); e != nil {
- return e
- }
- return p.WriteI32(ctx, int32(size))
-}
-
-func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
- return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
- return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
- return p.OutputBool(b)
-}
-
-func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
- return p.WriteI32(ctx, int32(b))
-}
-
-func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
- return p.WriteI32(ctx, int32(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
- return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
- return p.OutputF64(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
- return p.OutputString(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
- // JSON library only takes in a string,
- // not an arbitrary byte array, to ensure bytes are transmitted
- // efficiently we must convert this into a valid JSON string
- // therefore we use base64 encoding to avoid excessive escaping/quoting
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- writer := base64.NewEncoder(base64.StdEncoding, p.writer)
- if _, e := writer.Write(v); e != nil {
- p.writer.Reset(p.trans) // THRIFT-3735
- return NewTProtocolException(e)
- }
- if e := writer.Close(); e != nil {
- return NewTProtocolException(e)
- }
- if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
- return NewTProtocolException(e)
- }
- return p.OutputPostValue()
-}
-
-// Reading methods.
-func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
- p.resetContextStack() // THRIFT-3735
- if isNull, err := p.ParseListBegin(); isNull || err != nil {
- return name, typeId, seqId, err
- }
- if name, err = p.ReadString(ctx); err != nil {
- return name, typeId, seqId, err
- }
- bTypeId, err := p.ReadByte(ctx)
- typeId = TMessageType(bTypeId)
- if err != nil {
- return name, typeId, seqId, err
- }
- if seqId, err = p.ReadI32(ctx); err != nil {
- return name, typeId, seqId, err
- }
- return name, typeId, seqId, nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
- _, err = p.ParseObjectStart()
- return "", err
-}
-
-func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
- return p.ParseObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
- if err := p.ParsePreValue(); err != nil {
- return "", STOP, 0, err
- }
- b, _ := p.reader.Peek(1)
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACE[0]:
- return "", STOP, 0, nil
- case JSON_QUOTE:
- p.reader.ReadByte()
- name, err := p.ParseStringBody()
- // simplejson is not meant to be read back into thrift
- // - see http://wiki.apache.org/thrift/ThriftUsageJava
- // - use JSON instead
- if err != nil {
- return name, STOP, 0, err
- }
- return name, STOP, -1, p.ParsePostValue()
- }
- e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
- return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return "", STOP, 0, NewTProtocolException(io.EOF)
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, VOID, 0, e
- }
-
- // read keyType
- bKeyType, e := p.ReadByte(ctx)
- keyType = TType(bKeyType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read valueType
- bValueType, e := p.ReadByte(ctx)
- valueType = TType(bValueType)
- if e != nil {
- return keyType, valueType, size, e
- }
-
- // read size
- iSize, err := p.ReadI64(ctx)
- size = int(iSize)
- return keyType, valueType, size, err
-}
-
-func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
- return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
- return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
- var value bool
-
- if err := p.ParsePreValue(); err != nil {
- return value, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 {
- switch f[0] {
- case JSON_TRUE[0]:
- b := make([]byte, len(JSON_TRUE))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_TRUE) {
- value = true
- } else {
- e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- break
- case JSON_FALSE[0]:
- b := make([]byte, len(JSON_FALSE))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_FALSE) {
- value = false
- } else {
- e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- break
- case JSON_NULL[0]:
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return false, NewTProtocolException(err)
- }
- if string(b) == string(JSON_NULL) {
- value = false
- } else {
- e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- default:
- e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
- return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- return value, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
- v, err := p.ReadI64(ctx)
- return int8(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
- v, err := p.ReadI64(ctx)
- return int16(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
- v, err := p.ReadI64(ctx)
- return int32(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
- v, _, err := p.ParseI64()
- return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
- v, _, err := p.ParseF64()
- return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
- var v string
- if err := p.ParsePreValue(); err != nil {
- return v, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseStringBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
- var v []byte
- if err := p.ParsePreValue(); err != nil {
- return nil, err
- }
- f, _ := p.reader.Peek(1)
- if len(f) > 0 && f[0] == JSON_QUOTE {
- p.reader.ReadByte()
- value, err := p.ParseBase64EncodedBody()
- v = value
- if err != nil {
- return v, err
- }
- } else if len(f) > 0 && f[0] == JSON_NULL[0] {
- b := make([]byte, len(JSON_NULL))
- _, err := p.reader.Read(b)
- if err != nil {
- return v, NewTProtocolException(err)
- }
- if string(b) != string(JSON_NULL) {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- } else {
- e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
- return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
-
- return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
- return NewTProtocolException(p.writer.Flush())
-}
-
-func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
- return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TSimpleJSONProtocol) Transport() TTransport {
- return p.trans
-}
-
-func (p *TSimpleJSONProtocol) OutputPreValue() error {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- if _, e := p.write(JSON_COMMA); e != nil {
- return NewTProtocolException(e)
- }
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- if _, e := p.write(JSON_COLON); e != nil {
- return NewTProtocolException(e)
- }
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputPostValue() error {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST_FIRST:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_LIST)
- case _CONTEXT_IN_OBJECT_FIRST:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.dumpContext.pop()
- p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- var v string
- if value {
- v = string(JSON_TRUE)
- } else {
- v = string(JSON_FALSE)
- }
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = jsonQuote(v)
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputNull() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_NULL); e != nil {
- return NewTProtocolException(e)
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- var v string
- if math.IsNaN(value) {
- v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
- } else if math.IsInf(value, 1) {
- v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
- } else if math.IsInf(value, -1) {
- v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
- } else {
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- v = strconv.FormatFloat(value, 'g', -1, 64)
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
- }
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- cxt, ok := p.dumpContext.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- v := strconv.FormatInt(value, 10)
- switch cxt {
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- v = jsonQuote(v)
- }
- if e := p.OutputStringData(v); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputString(s string) error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if e := p.OutputStringData(jsonQuote(s)); e != nil {
- return e
- }
- return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
- _, e := p.write([]byte(s))
- return NewTProtocolException(e)
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_LBRACE); e != nil {
- return NewTProtocolException(e)
- }
- p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
- if _, e := p.write(JSON_RBRACE); e != nil {
- return NewTProtocolException(e)
- }
- _, ok := p.dumpContext.pop()
- if !ok {
- return errEmptyJSONContextStack
- }
- if e := p.OutputPostValue(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListBegin() error {
- if e := p.OutputPreValue(); e != nil {
- return e
- }
- if _, e := p.write(JSON_LBRACKET); e != nil {
- return NewTProtocolException(e)
- }
- p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListEnd() error {
- if _, e := p.write(JSON_RBRACKET); e != nil {
- return NewTProtocolException(e)
- }
- _, ok := p.dumpContext.pop()
- if !ok {
- return errEmptyJSONContextStack
- }
- if e := p.OutputPostValue(); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
- if e := p.OutputListBegin(); e != nil {
- return e
- }
- if e := p.OutputI64(int64(elemType)); e != nil {
- return e
- }
- if e := p.OutputI64(int64(size)); e != nil {
- return e
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePreValue() error {
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- cxt, ok := p.parseContextStack.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- b, _ := p.reader.Peek(1)
- switch cxt {
- case _CONTEXT_IN_LIST:
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACKET[0]:
- return nil
- case JSON_COMMA[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- case _CONTEXT_IN_OBJECT_NEXT_KEY:
- if len(b) > 0 {
- switch b[0] {
- case JSON_RBRACE[0]:
- return nil
- case JSON_COMMA[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- if len(b) > 0 {
- switch b[0] {
- case JSON_COLON[0]:
- p.reader.ReadByte()
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- return nil
- default:
- e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePostValue() error {
- if e := p.readNonSignificantWhitespace(); e != nil {
- return NewTProtocolException(e)
- }
- cxt, ok := p.parseContextStack.peek()
- if !ok {
- return errEmptyJSONContextStack
- }
- switch cxt {
- case _CONTEXT_IN_LIST_FIRST:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_LIST)
- case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
- case _CONTEXT_IN_OBJECT_NEXT_VALUE:
- p.parseContextStack.pop()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
- for {
- b, _ := p.reader.Peek(1)
- if len(b) < 1 {
- return nil
- }
- switch b[0] {
- case ' ', '\r', '\n', '\t':
- p.reader.ReadByte()
- continue
- default:
- break
- }
- break
- }
- return nil
-}
-
-func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
- line, err := p.reader.ReadString(JSON_QUOTE)
- if err != nil {
- return "", NewTProtocolException(err)
- }
- l := len(line)
- // count number of escapes to see if we need to keep going
- i := 1
- for ; i < l; i++ {
- if line[l-i-1] != '\\' {
- break
- }
- }
- if i&0x01 == 1 {
- v, ok := jsonUnquote(string(JSON_QUOTE) + line)
- if !ok {
- return "", NewTProtocolException(err)
- }
- return v, nil
- }
- s, err := p.ParseQuotedStringBody()
- if err != nil {
- return "", NewTProtocolException(err)
- }
- str := string(JSON_QUOTE) + line + s
- v, ok := jsonUnquote(str)
- if !ok {
- e := fmt.Errorf("Unable to parse as JSON string %s", str)
- return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
- line, err := p.reader.ReadString(JSON_QUOTE)
- if err != nil {
- return "", NewTProtocolException(err)
- }
- l := len(line)
- // count number of escapes to see if we need to keep going
- i := 1
- for ; i < l; i++ {
- if line[l-i-1] != '\\' {
- break
- }
- }
- if i&0x01 == 1 {
- return line, nil
- }
- s, err := p.ParseQuotedStringBody()
- if err != nil {
- return "", NewTProtocolException(err)
- }
- v := line + s
- return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
- line, err := p.reader.ReadBytes(JSON_QUOTE)
- if err != nil {
- return line, NewTProtocolException(err)
- }
- line2 := line[0 : len(line)-1]
- l := len(line2)
- if (l % 4) != 0 {
- pad := 4 - (l % 4)
- fill := [...]byte{'=', '=', '='}
- line2 = append(line2, fill[:pad]...)
- l = len(line2)
- }
- output := make([]byte, base64.StdEncoding.DecodedLen(l))
- n, err := base64.StdEncoding.Decode(output, line2)
- return output[0:n], NewTProtocolException(err)
-}
-
-func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return 0, false, err
- }
- var value int64
- var isnull bool
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- isnull = true
- } else {
- num, err := p.readNumeric()
- isnull = (num == nil)
- if !isnull {
- value = num.Int64()
- }
- if err != nil {
- return value, isnull, err
- }
- }
- return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return 0, false, err
- }
- var value float64
- var isnull bool
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- isnull = true
- } else {
- num, err := p.readNumeric()
- isnull = (num == nil)
- if !isnull {
- value = num.Float64()
- }
- if err != nil {
- return value, isnull, err
- }
- }
- return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
- if err := p.ParsePreValue(); err != nil {
- return false, err
- }
- var b []byte
- b, err := p.reader.Peek(1)
- if err != nil {
- return false, err
- }
- if len(b) > 0 && b[0] == JSON_LBRACE[0] {
- p.reader.ReadByte()
- p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
- return false, nil
- } else if p.safePeekContains(JSON_NULL) {
- return true, nil
- }
- e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
- return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
- if isNull, err := p.readIfNull(); isNull || err != nil {
- return err
- }
- cxt, _ := p.parseContextStack.peek()
- if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
- e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- line, err := p.reader.ReadString(JSON_RBRACE[0])
- if err != nil {
- return NewTProtocolException(err)
- }
- for _, char := range line {
- switch char {
- default:
- e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- case ' ', '\n', '\r', '\t', '}':
- break
- }
- }
- p.parseContextStack.pop()
- return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
- if e := p.ParsePreValue(); e != nil {
- return false, e
- }
- var b []byte
- b, err = p.reader.Peek(1)
- if err != nil {
- return false, err
- }
- if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
- p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
- p.reader.ReadByte()
- isNull = false
- } else if p.safePeekContains(JSON_NULL) {
- isNull = true
- } else {
- err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
- }
- return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
-}
-
-func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
- if isNull, e := p.ParseListBegin(); isNull || e != nil {
- return VOID, 0, e
- }
- bElemType, _, err := p.ParseI64()
- elemType = TType(bElemType)
- if err != nil {
- return elemType, size, err
- }
- nSize, _, err2 := p.ParseI64()
- size = int(nSize)
- return elemType, size, err2
-}
-
-func (p *TSimpleJSONProtocol) ParseListEnd() error {
- if isNull, err := p.readIfNull(); isNull || err != nil {
- return err
- }
- cxt, _ := p.parseContextStack.peek()
- if cxt != _CONTEXT_IN_LIST {
- e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- line, err := p.reader.ReadString(JSON_RBRACKET[0])
- if err != nil {
- return NewTProtocolException(err)
- }
- for _, char := range line {
- switch char {
- default:
- e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
- return NewTProtocolExceptionWithType(INVALID_DATA, e)
- case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
- break
- }
- }
- p.parseContextStack.pop()
- if cxt, ok := p.parseContextStack.peek(); !ok {
- return errEmptyJSONContextStack
- } else if cxt == _CONTEXT_IN_TOPLEVEL {
- return nil
- }
- return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
- e := p.readNonSignificantWhitespace()
- if e != nil {
- return nil, VOID, NewTProtocolException(e)
- }
- b, e := p.reader.Peek(1)
- if len(b) > 0 {
- c := b[0]
- switch c {
- case JSON_NULL[0]:
- buf := make([]byte, len(JSON_NULL))
- _, e := p.reader.Read(buf)
- if e != nil {
- return nil, VOID, NewTProtocolException(e)
- }
- if string(JSON_NULL) != string(buf) {
- e = mismatch(string(JSON_NULL), string(buf))
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return nil, VOID, nil
- case JSON_QUOTE:
- p.reader.ReadByte()
- v, e := p.ParseStringBody()
- if e != nil {
- return v, UTF8, NewTProtocolException(e)
- }
- if v == JSON_INFINITY {
- return INFINITY, DOUBLE, nil
- } else if v == JSON_NEGATIVE_INFINITY {
- return NEGATIVE_INFINITY, DOUBLE, nil
- } else if v == JSON_NAN {
- return NAN, DOUBLE, nil
- }
- return v, UTF8, nil
- case JSON_TRUE[0]:
- buf := make([]byte, len(JSON_TRUE))
- _, e := p.reader.Read(buf)
- if e != nil {
- return true, BOOL, NewTProtocolException(e)
- }
- if string(JSON_TRUE) != string(buf) {
- e := mismatch(string(JSON_TRUE), string(buf))
- return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return true, BOOL, nil
- case JSON_FALSE[0]:
- buf := make([]byte, len(JSON_FALSE))
- _, e := p.reader.Read(buf)
- if e != nil {
- return false, BOOL, NewTProtocolException(e)
- }
- if string(JSON_FALSE) != string(buf) {
- e := mismatch(string(JSON_FALSE), string(buf))
- return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return false, BOOL, nil
- case JSON_LBRACKET[0]:
- _, e := p.reader.ReadByte()
- return make([]interface{}, 0), LIST, NewTProtocolException(e)
- case JSON_LBRACE[0]:
- _, e := p.reader.ReadByte()
- return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
- // assume numeric
- v, e := p.readNumeric()
- return v, DOUBLE, e
- default:
- e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- e = fmt.Errorf("Cannot read a single element while parsing JSON.")
- return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
-
-}
-
-func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
- cont := true
- for cont {
- b, _ := p.reader.Peek(1)
- if len(b) < 1 {
- return false, nil
- }
- switch b[0] {
- default:
- return false, nil
- case JSON_NULL[0]:
- cont = false
- break
- case ' ', '\n', '\r', '\t':
- p.reader.ReadByte()
- break
- }
- }
- if p.safePeekContains(JSON_NULL) {
- p.reader.Read(make([]byte, len(JSON_NULL)))
- return true, nil
- }
- return false, nil
-}
-
-func (p *TSimpleJSONProtocol) readQuoteIfNext() {
- b, _ := p.reader.Peek(1)
- if len(b) > 0 && b[0] == JSON_QUOTE {
- p.reader.ReadByte()
- }
-}
-
-func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
- isNull, err := p.readIfNull()
- if isNull || err != nil {
- return NUMERIC_NULL, err
- }
- hasDecimalPoint := false
- nextCanBeSign := true
- hasE := false
- MAX_LEN := 40
- buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
- continueFor := true
- inQuotes := false
- for continueFor {
- c, err := p.reader.ReadByte()
- if err != nil {
- if err == io.EOF {
- break
- }
- return NUMERIC_NULL, NewTProtocolException(err)
- }
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- buf.WriteByte(c)
- nextCanBeSign = false
- case '.':
- if hasDecimalPoint {
- e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if hasE {
- e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- hasDecimalPoint, nextCanBeSign = true, false
- case 'e', 'E':
- if hasE {
- e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- hasE, nextCanBeSign = true, true
- case '-', '+':
- if !nextCanBeSign {
- e := fmt.Errorf("Negative sign within number")
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- buf.WriteByte(c)
- nextCanBeSign = false
- case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
- p.reader.UnreadByte()
- continueFor = false
- case JSON_NAN[0]:
- if buf.Len() == 0 {
- buffer := make([]byte, len(JSON_NAN))
- buffer[0] = c
- _, e := p.reader.Read(buffer[1:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_NAN != string(buffer) {
- e := mismatch(JSON_NAN, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return NAN, nil
- } else {
- e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- case JSON_INFINITY[0]:
- if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
- buffer := make([]byte, len(JSON_INFINITY))
- buffer[0] = c
- _, e := p.reader.Read(buffer[1:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_INFINITY != string(buffer) {
- e := mismatch(JSON_INFINITY, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return INFINITY, nil
- } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
- buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
- buffer[0] = JSON_NEGATIVE_INFINITY[0]
- buffer[1] = c
- _, e := p.reader.Read(buffer[2:])
- if e != nil {
- return NUMERIC_NULL, NewTProtocolException(e)
- }
- if JSON_NEGATIVE_INFINITY != string(buffer) {
- e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- if inQuotes {
- p.readQuoteIfNext()
- }
- return NEGATIVE_INFINITY, nil
- } else {
- e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- case JSON_QUOTE:
- if !inQuotes {
- inQuotes = true
- } else {
- break
- }
- default:
- e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- }
- if buf.Len() == 0 {
- e := fmt.Errorf("Unable to parse number from empty string ''")
- return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
- }
- return NewNumericFromJSONString(buf.String(), false), nil
-}
-
-// Safely peeks into the buffer, reading only what is necessary
-func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
- for i := 0; i < len(b); i++ {
- a, _ := p.reader.Peek(i + 1)
- if len(a) < (i+1) || a[i] != b[i] {
- return false
- }
- }
- return true
-}
-
-// Reset the context stack to its initial state.
-func (p *TSimpleJSONProtocol) resetContextStack() {
- p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
- p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
-}
-
-func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
- n, err := p.writer.Write(b)
- if err != nil {
- p.writer.Reset(p.trans) // THRIFT-3735
- }
- return n, err
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
- PropagateTConfiguration(p.trans, conf)
-}
-
-var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
deleted file mode 100644
index 563cbfc694..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "fmt"
- "io"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// ErrAbandonRequest is a special error server handler implementations can
-// return to indicate that the request has been abandoned.
-//
-// TSimpleServer will check for this error, and close the client connection
-// instead of writing the response/error back to the client.
-//
-// It shall only be used when the server handler implementation know that the
-// client already abandoned the request (by checking that the passed in context
-// is already canceled, for example).
-var ErrAbandonRequest = errors.New("request abandoned")
-
-// ServerConnectivityCheckInterval defines the ticker interval used by
-// connectivity check in thrift compiled TProcessorFunc implementations.
-//
-// It's defined as a variable instead of constant, so that thrift server
-// implementations can change its value to control the behavior.
-//
-// If it's changed to <=0, the feature will be disabled.
-var ServerConnectivityCheckInterval = time.Millisecond * 5
-
-/*
- * This is not a typical TSimpleServer as it is not blocked after accept a socket.
- * It is more like a TThreadedServer that can handle different connections in different goroutines.
- * This will work if golang user implements a conn-pool like thing in client side.
- */
-type TSimpleServer struct {
- closed int32
- wg sync.WaitGroup
- mu sync.Mutex
-
- processorFactory TProcessorFactory
- serverTransport TServerTransport
- inputTransportFactory TTransportFactory
- outputTransportFactory TTransportFactory
- inputProtocolFactory TProtocolFactory
- outputProtocolFactory TProtocolFactory
-
- // Headers to auto forward in THeaderProtocol
- forwardHeaders []string
-
- logger Logger
-}
-
-func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
- return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
-}
-
-func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
- serverTransport,
- transportFactory,
- protocolFactory,
- )
-}
-
-func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
- serverTransport,
- inputTransportFactory,
- outputTransportFactory,
- inputProtocolFactory,
- outputProtocolFactory,
- )
-}
-
-func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
- return NewTSimpleServerFactory6(processorFactory,
- serverTransport,
- NewTTransportFactory(),
- NewTTransportFactory(),
- NewTBinaryProtocolFactoryDefault(),
- NewTBinaryProtocolFactoryDefault(),
- )
-}
-
-func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
- return NewTSimpleServerFactory6(processorFactory,
- serverTransport,
- transportFactory,
- transportFactory,
- protocolFactory,
- protocolFactory,
- )
-}
-
-func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
- return &TSimpleServer{
- processorFactory: processorFactory,
- serverTransport: serverTransport,
- inputTransportFactory: inputTransportFactory,
- outputTransportFactory: outputTransportFactory,
- inputProtocolFactory: inputProtocolFactory,
- outputProtocolFactory: outputProtocolFactory,
- }
-}
-
-func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
- return p.processorFactory
-}
-
-func (p *TSimpleServer) ServerTransport() TServerTransport {
- return p.serverTransport
-}
-
-func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
- return p.inputTransportFactory
-}
-
-func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
- return p.outputTransportFactory
-}
-
-func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
- return p.inputProtocolFactory
-}
-
-func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
- return p.outputProtocolFactory
-}
-
-func (p *TSimpleServer) Listen() error {
- return p.serverTransport.Listen()
-}
-
-// SetForwardHeaders sets the list of header keys that will be auto forwarded
-// while using THeaderProtocol.
-//
-// "forward" means that when the server is also a client to other upstream
-// thrift servers, the context object user gets in the processor functions will
-// have both read and write headers set, with write headers being forwarded.
-// Users can always override the write headers by calling SetWriteHeaderList
-// before calling thrift client functions.
-func (p *TSimpleServer) SetForwardHeaders(headers []string) {
- size := len(headers)
- if size == 0 {
- p.forwardHeaders = nil
- return
- }
-
- keys := make([]string, size)
- copy(keys, headers)
- p.forwardHeaders = keys
-}
-
-// SetLogger sets the logger used by this TSimpleServer.
-//
-// If no logger was set before Serve is called, a default logger using standard
-// log library will be used.
-func (p *TSimpleServer) SetLogger(logger Logger) {
- p.logger = logger
-}
-
-func (p *TSimpleServer) innerAccept() (int32, error) {
- client, err := p.serverTransport.Accept()
- p.mu.Lock()
- defer p.mu.Unlock()
- closed := atomic.LoadInt32(&p.closed)
- if closed != 0 {
- return closed, nil
- }
- if err != nil {
- return 0, err
- }
- if client != nil {
- p.wg.Add(1)
- go func() {
- defer p.wg.Done()
- if err := p.processRequests(client); err != nil {
- p.logger(fmt.Sprintf("error processing request: %v", err))
- }
- }()
- }
- return 0, nil
-}
-
-func (p *TSimpleServer) AcceptLoop() error {
- for {
- closed, err := p.innerAccept()
- if err != nil {
- return err
- }
- if closed != 0 {
- return nil
- }
- }
-}
-
-func (p *TSimpleServer) Serve() error {
- p.logger = fallbackLogger(p.logger)
-
- err := p.Listen()
- if err != nil {
- return err
- }
- p.AcceptLoop()
- return nil
-}
-
-func (p *TSimpleServer) Stop() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if atomic.LoadInt32(&p.closed) != 0 {
- return nil
- }
- atomic.StoreInt32(&p.closed, 1)
- p.serverTransport.Interrupt()
- p.wg.Wait()
- return nil
-}
-
-// If err is actually EOF, return nil, otherwise return err as-is.
-func treatEOFErrorsAsNil(err error) error {
- if err == nil {
- return nil
- }
- if errors.Is(err, io.EOF) {
- return nil
- }
- var te TTransportException
- if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
- return nil
- }
- return err
-}
-
-func (p *TSimpleServer) processRequests(client TTransport) (err error) {
- defer func() {
- err = treatEOFErrorsAsNil(err)
- }()
-
- processor := p.processorFactory.GetProcessor(client)
- inputTransport, err := p.inputTransportFactory.GetTransport(client)
- if err != nil {
- return err
- }
- inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
- var outputTransport TTransport
- var outputProtocol TProtocol
-
- // for THeaderProtocol, we must use the same protocol instance for
- // input and output so that the response is in the same dialect that
- // the server detected the request was in.
- headerProtocol, ok := inputProtocol.(*THeaderProtocol)
- if ok {
- outputProtocol = inputProtocol
- } else {
- oTrans, err := p.outputTransportFactory.GetTransport(client)
- if err != nil {
- return err
- }
- outputTransport = oTrans
- outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
- }
-
- if inputTransport != nil {
- defer inputTransport.Close()
- }
- if outputTransport != nil {
- defer outputTransport.Close()
- }
- for {
- if atomic.LoadInt32(&p.closed) != 0 {
- return nil
- }
-
- ctx := SetResponseHelper(
- defaultCtx,
- TResponseHelper{
- THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
- },
- )
- if headerProtocol != nil {
- // We need to call ReadFrame here, otherwise we won't
- // get any headers on the AddReadTHeaderToContext call.
- //
- // ReadFrame is safe to be called multiple times so it
- // won't break when it's called again later when we
- // actually start to read the message.
- if err := headerProtocol.ReadFrame(ctx); err != nil {
- return err
- }
- ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
- ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
- }
-
- ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
- if errors.Is(err, ErrAbandonRequest) {
- return client.Close()
- }
- if errors.As(err, new(TTransportException)) && err != nil {
- return err
- }
- var tae TApplicationException
- if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
- continue
- }
- if !ok {
- break
- }
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
deleted file mode 100644
index ba2738a8df..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "context"
- "errors"
- "io"
-)
-
-var errTransportInterrupted = errors.New("Transport Interrupted")
-
-type Flusher interface {
- Flush() (err error)
-}
-
-type ContextFlusher interface {
- Flush(ctx context.Context) (err error)
-}
-
-type ReadSizeProvider interface {
- RemainingBytes() (num_bytes uint64)
-}
-
-// Encapsulates the I/O layer
-type TTransport interface {
- io.ReadWriteCloser
- ContextFlusher
- ReadSizeProvider
-
- // Opens the transport for communication
- Open() error
-
- // Returns true if the transport is open
- IsOpen() bool
-}
-
-type stringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// This is "enchanced" transport with extra capabilities. You need to use one of these
-// to construct protocol.
-// Notably, TSocket does not implement this interface, and it is always a mistake to use
-// TSocket directly in protocol.
-type TRichTransport interface {
- io.ReadWriter
- io.ByteReader
- io.ByteWriter
- stringWriter
- ContextFlusher
- ReadSizeProvider
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
deleted file mode 100644
index 0a3f07646d..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
- "errors"
- "io"
-)
-
-type timeoutable interface {
- Timeout() bool
-}
-
-// Thrift Transport exception
-type TTransportException interface {
- TException
- TypeId() int
- Err() error
-}
-
-const (
- UNKNOWN_TRANSPORT_EXCEPTION = 0
- NOT_OPEN = 1
- ALREADY_OPEN = 2
- TIMED_OUT = 3
- END_OF_FILE = 4
-)
-
-type tTransportException struct {
- typeId int
- err error
- msg string
-}
-
-var _ TTransportException = (*tTransportException)(nil)
-
-func (tTransportException) TExceptionType() TExceptionType {
- return TExceptionTypeTransport
-}
-
-func (p *tTransportException) TypeId() int {
- return p.typeId
-}
-
-func (p *tTransportException) Error() string {
- return p.msg
-}
-
-func (p *tTransportException) Err() error {
- return p.err
-}
-
-func (p *tTransportException) Unwrap() error {
- return p.err
-}
-
-func (p *tTransportException) Timeout() bool {
- return p.typeId == TIMED_OUT
-}
-
-func NewTTransportException(t int, e string) TTransportException {
- return &tTransportException{
- typeId: t,
- err: errors.New(e),
- msg: e,
- }
-}
-
-func NewTTransportExceptionFromError(e error) TTransportException {
- if e == nil {
- return nil
- }
-
- if t, ok := e.(TTransportException); ok {
- return t
- }
-
- te := &tTransportException{
- typeId: UNKNOWN_TRANSPORT_EXCEPTION,
- err: e,
- msg: e.Error(),
- }
-
- if isTimeoutError(e) {
- te.typeId = TIMED_OUT
- return te
- }
-
- if errors.Is(e, io.EOF) {
- te.typeId = END_OF_FILE
- return te
- }
-
- return te
-}
-
-func prependTTransportException(prepend string, e TTransportException) TTransportException {
- return &tTransportException{
- typeId: e.TypeId(),
- err: e,
- msg: prepend + e.Error(),
- }
-}
-
-// isTimeoutError returns true when err is an error caused by timeout.
-//
-// Note that this also includes TTransportException wrapped timeout errors.
-func isTimeoutError(err error) bool {
- var t timeoutable
- if errors.As(err, &t) {
- return t.Timeout()
- }
- return false
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
deleted file mode 100644
index c805807940..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory class used to create wrapped instance of Transports.
-// This is used primarily in servers, which get Transports from
-// a ServerTransport and then may want to mutate them (i.e. create
-// a BufferedTransport from the underlying base transport)
-type TTransportFactory interface {
- GetTransport(trans TTransport) (TTransport, error)
-}
-
-type tTransportFactory struct{}
-
-// Return a wrapped instance of the base Transport.
-func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
- return trans, nil
-}
-
-func NewTTransportFactory() TTransportFactory {
- return &tTransportFactory{}
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
deleted file mode 100644
index 4292ffcadb..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/thrift/type.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Type constants in the Thrift protocol
-type TType byte
-
-const (
- STOP = 0
- VOID = 1
- BOOL = 2
- BYTE = 3
- I08 = 3
- DOUBLE = 4
- I16 = 6
- I32 = 8
- I64 = 10
- STRING = 11
- UTF7 = 11
- STRUCT = 12
- MAP = 13
- SET = 14
- LIST = 15
- UTF8 = 16
- UTF16 = 17
- //BINARY = 18 wrong and unusued
-)
-
-var typeNames = map[int]string{
- STOP: "STOP",
- VOID: "VOID",
- BOOL: "BOOL",
- BYTE: "BYTE",
- DOUBLE: "DOUBLE",
- I16: "I16",
- I32: "I32",
- I64: "I64",
- STRING: "STRING",
- STRUCT: "STRUCT",
- MAP: "MAP",
- SET: "SET",
- LIST: "LIST",
- UTF8: "UTF8",
- UTF16: "UTF16",
-}
-
-func (p TType) String() string {
- if s, ok := typeNames[int(p)]; ok {
- return s
- }
- return "Unknown"
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
deleted file mode 100644
index 9a627bed5a..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright (c) 2017-2018 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "fmt"
- "io"
- "math/rand"
- "os"
- "reflect"
- "strconv"
- "sync"
- "time"
-
- "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- "github.com/uber/jaeger-client-go/internal/throttler"
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// Tracer implements opentracing.Tracer.
-type Tracer struct {
- serviceName string
- hostIPv4 uint32 // this is for zipkin endpoint conversion
-
- sampler SamplerV2
- reporter Reporter
- metrics Metrics
- logger log.DebugLogger
-
- timeNow func() time.Time
- randomNumber func() uint64
-
- options struct {
- gen128Bit bool // whether to generate 128bit trace IDs
- zipkinSharedRPCSpan bool
- highTraceIDGenerator func() uint64 // custom high trace ID generator
- maxTagValueLength int
- noDebugFlagOnForcedSampling bool
- maxLogsPerSpan int
- // more options to come
- }
- // allocator of Span objects
- spanAllocator SpanAllocator
-
- injectors map[interface{}]Injector
- extractors map[interface{}]Extractor
-
- observer compositeObserver
-
- tags []Tag
- process Process
-
- baggageRestrictionManager baggage.RestrictionManager
- baggageSetter *baggageSetter
-
- debugThrottler throttler.Throttler
-}
-
-// NewTracer creates Tracer implementation that reports tracing to Jaeger.
-// The returned io.Closer can be used in shutdown hooks to ensure that the internal
-// queue of the Reporter is drained and all buffered spans are submitted to collectors.
-// TODO (breaking change) return *Tracer only, without closer.
-func NewTracer(
- serviceName string,
- sampler Sampler,
- reporter Reporter,
- options ...TracerOption,
-) (opentracing.Tracer, io.Closer) {
- t := &Tracer{
- serviceName: serviceName,
- sampler: samplerV1toV2(sampler),
- reporter: reporter,
- injectors: make(map[interface{}]Injector),
- extractors: make(map[interface{}]Extractor),
- metrics: *NewNullMetrics(),
- spanAllocator: simpleSpanAllocator{},
- }
-
- for _, option := range options {
- option(t)
- }
-
- // register default injectors/extractors unless they are already provided via options
- textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
- t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
-
- httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
- t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
-
- binaryPropagator := NewBinaryPropagator(t)
- t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
-
- // TODO remove after TChannel supports OpenTracing
- interopPropagator := &jaegerTraceContextPropagator{tracer: t}
- t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
-
- zipkinPropagator := &zipkinPropagator{tracer: t}
- t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
-
- if t.baggageRestrictionManager != nil {
- t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
- } else {
- t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
- }
- if t.debugThrottler == nil {
- t.debugThrottler = throttler.DefaultThrottler{}
- }
-
- if t.randomNumber == nil {
- seedGenerator := utils.NewRand(time.Now().UnixNano())
- pool := sync.Pool{
- New: func() interface{} {
- return rand.NewSource(seedGenerator.Int63())
- },
- }
-
- t.randomNumber = func() uint64 {
- generator := pool.Get().(rand.Source)
- number := uint64(generator.Int63())
- pool.Put(generator)
- return number
- }
- }
- if t.timeNow == nil {
- t.timeNow = time.Now
- }
- if t.logger == nil {
- t.logger = log.NullLogger
- }
- // Set tracer-level tags
- t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
- if hostname, err := os.Hostname(); err == nil {
- t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
- }
- if ipval, ok := t.getTag(TracerIPTagKey); ok {
- ipv4, err := utils.ParseIPToUint32(ipval.(string))
- if err != nil {
- t.hostIPv4 = 0
- t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
- } else {
- t.hostIPv4 = ipv4
- }
- } else if ip, err := utils.HostIP(); err == nil {
- t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
- t.hostIPv4 = utils.PackIPAsUint32(ip)
- } else {
- t.logger.Error("Unable to determine this host's IP address: " + err.Error())
- }
-
- if t.options.gen128Bit {
- if t.options.highTraceIDGenerator == nil {
- t.options.highTraceIDGenerator = t.randomNumber
- }
- } else if t.options.highTraceIDGenerator != nil {
- t.logger.Error("Overriding high trace ID generator but not generating " +
- "128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
- }
- if t.options.maxTagValueLength == 0 {
- t.options.maxTagValueLength = DefaultMaxTagValueLength
- }
- t.process = Process{
- Service: serviceName,
- UUID: strconv.FormatUint(t.randomNumber(), 16),
- Tags: t.tags,
- }
- if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
- throttler.SetProcess(t.process)
- }
-
- return t, t
-}
-
-// addCodec adds registers injector and extractor for given propagation format if not already defined.
-func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
- if _, ok := t.injectors[format]; !ok {
- t.injectors[format] = injector
- }
- if _, ok := t.extractors[format]; !ok {
- t.extractors[format] = extractor
- }
-}
-
-// StartSpan implements StartSpan() method of opentracing.Tracer.
-func (t *Tracer) StartSpan(
- operationName string,
- options ...opentracing.StartSpanOption,
-) opentracing.Span {
- sso := opentracing.StartSpanOptions{}
- for _, o := range options {
- o.Apply(&sso)
- }
- return t.startSpanWithOptions(operationName, sso)
-}
-
-func (t *Tracer) startSpanWithOptions(
- operationName string,
- options opentracing.StartSpanOptions,
-) opentracing.Span {
- if options.StartTime.IsZero() {
- options.StartTime = t.timeNow()
- }
-
- // Predicate whether the given span context is an empty reference
- // or may be used as parent / debug ID / baggage items source
- isEmptyReference := func(ctx SpanContext) bool {
- return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
- }
-
- var references []Reference
- var parent SpanContext
- var hasParent bool // need this because `parent` is a value, not reference
- var ctx SpanContext
- var isSelfRef bool
- for _, ref := range options.References {
- ctxRef, ok := ref.ReferencedContext.(SpanContext)
- if !ok {
- t.logger.Error(fmt.Sprintf(
- "Reference contains invalid type of SpanReference: %s",
- reflect.ValueOf(ref.ReferencedContext)))
- continue
- }
- if isEmptyReference(ctxRef) {
- continue
- }
-
- if ref.Type == selfRefType {
- isSelfRef = true
- ctx = ctxRef
- continue
- }
-
- if ctxRef.IsValid() {
- // we don't want empty context that contains only debug-id or baggage
- references = append(references, Reference{Type: ref.Type, Context: ctxRef})
- }
-
- if !hasParent {
- parent = ctxRef
- hasParent = ref.Type == opentracing.ChildOfRef
- }
- }
- if !hasParent && !isEmptyReference(parent) {
- // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
- // the FollowFromRef as the parent
- hasParent = true
- }
-
- rpcServer := false
- if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
- rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
- }
-
- var internalTags []Tag
- newTrace := false
- if !isSelfRef {
- if !hasParent || !parent.IsValid() {
- newTrace = true
- ctx.traceID.Low = t.randomID()
- if t.options.gen128Bit {
- ctx.traceID.High = t.options.highTraceIDGenerator()
- }
- ctx.spanID = SpanID(ctx.traceID.Low)
- ctx.parentID = 0
- ctx.samplingState = &samplingState{
- localRootSpan: ctx.spanID,
- }
- if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
- ctx.samplingState.setDebugAndSampled()
- internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
- }
- } else {
- ctx.traceID = parent.traceID
- if rpcServer && t.options.zipkinSharedRPCSpan {
- // Support Zipkin's one-span-per-RPC model
- ctx.spanID = parent.spanID
- ctx.parentID = parent.parentID
- } else {
- ctx.spanID = SpanID(t.randomID())
- ctx.parentID = parent.spanID
- }
- ctx.samplingState = parent.samplingState
- if parent.remote {
- ctx.samplingState.setFinal()
- ctx.samplingState.localRootSpan = ctx.spanID
- }
- }
- if hasParent {
- // copy baggage items
- if l := len(parent.baggage); l > 0 {
- ctx.baggage = make(map[string]string, len(parent.baggage))
- for k, v := range parent.baggage {
- ctx.baggage[k] = v
- }
- }
- }
- }
-
- sp := t.newSpan()
- sp.context = ctx
- sp.tracer = t
- sp.operationName = operationName
- sp.startTime = options.StartTime
- sp.duration = 0
- sp.references = references
- sp.firstInProcess = rpcServer || sp.context.parentID == 0
-
- if !sp.context.isSamplingFinalized() {
- decision := t.sampler.OnCreateSpan(sp)
- sp.applySamplingDecision(decision, false)
- }
- sp.observer = t.observer.OnStartSpan(sp, operationName, options)
-
- if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
- if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
- sp.tags = make([]Tag, 0, tagsTotalLength)
- }
- sp.tags = append(sp.tags, internalTags...)
- for k, v := range options.Tags {
- sp.setTagInternal(k, v, false)
- }
- }
- t.emitNewSpanMetrics(sp, newTrace)
- return sp
-}
-
-// Inject implements Inject() method of opentracing.Tracer
-func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
- c, ok := ctx.(SpanContext)
- if !ok {
- return opentracing.ErrInvalidSpanContext
- }
- if injector, ok := t.injectors[format]; ok {
- return injector.Inject(c, carrier)
- }
- return opentracing.ErrUnsupportedFormat
-}
-
-// Extract implements Extract() method of opentracing.Tracer
-func (t *Tracer) Extract(
- format interface{},
- carrier interface{},
-) (opentracing.SpanContext, error) {
- if extractor, ok := t.extractors[format]; ok {
- spanCtx, err := extractor.Extract(carrier)
- if err != nil {
- return nil, err // ensure returned spanCtx is nil
- }
- spanCtx.remote = true
- return spanCtx, nil
- }
- return nil, opentracing.ErrUnsupportedFormat
-}
-
-// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
-func (t *Tracer) Close() error {
- t.logger.Debugf("closing tracer")
- t.reporter.Close()
- t.sampler.Close()
- if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
- _ = mgr.Close()
- }
- if throttler, ok := t.debugThrottler.(io.Closer); ok {
- _ = throttler.Close()
- }
- return nil
-}
-
-// Tags returns a slice of tracer-level tags.
-func (t *Tracer) Tags() []opentracing.Tag {
- tags := make([]opentracing.Tag, len(t.tags))
- for i, tag := range t.tags {
- tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
- }
- return tags
-}
-
-// getTag returns the value of specific tag, if not exists, return nil.
-// TODO only used by tests, move there.
-func (t *Tracer) getTag(key string) (interface{}, bool) {
- for _, tag := range t.tags {
- if tag.key == key {
- return tag.value, true
- }
- }
- return nil, false
-}
-
-// newSpan returns an instance of a clean Span object.
-// If options.PoolSpans is true, the spans are retrieved from an object pool.
-func (t *Tracer) newSpan() *Span {
- return t.spanAllocator.Get()
-}
-
-// emitNewSpanMetrics generates metrics on the number of started spans and traces.
-// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
-// server-side RPC span has the exact same trace/span/parent IDs as the
-// calling client-side span, but obviously the server side span is
-// no longer a root span of the trace.
-func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
- if !sp.context.isSamplingFinalized() {
- t.metrics.SpansStartedDelayedSampling.Inc(1)
- if newTrace {
- t.metrics.TracesStartedDelayedSampling.Inc(1)
- }
- // joining a trace is not possible, because sampling decision inherited from upstream is final
- } else if sp.context.IsSampled() {
- t.metrics.SpansStartedSampled.Inc(1)
- if newTrace {
- t.metrics.TracesStartedSampled.Inc(1)
- } else if sp.firstInProcess {
- t.metrics.TracesJoinedSampled.Inc(1)
- }
- } else {
- t.metrics.SpansStartedNotSampled.Inc(1)
- if newTrace {
- t.metrics.TracesStartedNotSampled.Inc(1)
- } else if sp.firstInProcess {
- t.metrics.TracesJoinedNotSampled.Inc(1)
- }
- }
-}
-
-func (t *Tracer) reportSpan(sp *Span) {
- ctx := sp.SpanContext()
-
- if !ctx.isSamplingFinalized() {
- t.metrics.SpansFinishedDelayedSampling.Inc(1)
- } else if ctx.IsSampled() {
- t.metrics.SpansFinishedSampled.Inc(1)
- } else {
- t.metrics.SpansFinishedNotSampled.Inc(1)
- }
-
- // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
- // and then Release() it when no longer needed.
- // Otherwise, the span may be reused for another trace and its data may be overwritten.
- if ctx.IsSampled() {
- t.reporter.Report(sp)
- }
-
- sp.Release()
-}
-
-// randomID generates a random trace/span ID, using tracer.random() generator.
-// It never returns 0.
-func (t *Tracer) randomID() uint64 {
- val := t.randomNumber()
- for val == 0 {
- val = t.randomNumber()
- }
- return val
-}
-
-// (NB) span must hold the lock before making this call
-func (t *Tracer) setBaggage(sp *Span, key, value string) {
- t.baggageSetter.setBaggage(sp, key, value)
-}
-
-// (NB) span must hold the lock before making this call
-func (t *Tracer) isDebugAllowed(operation string) bool {
- return t.debugThrottler.IsAllowed(operation)
-}
-
-// Sampler returns the sampler given to the tracer at creation.
-func (t *Tracer) Sampler() SamplerV2 {
- return t.sampler
-}
-
-// SelfRef creates an opentracing compliant SpanReference from a jaeger
-// SpanContext. This is a factory function in order to encapsulate jaeger specific
-// types.
-func SelfRef(ctx SpanContext) opentracing.SpanReference {
- return opentracing.SpanReference{
- Type: selfRefType,
- ReferencedContext: ctx,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
deleted file mode 100644
index 16b4606564..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "time"
-
- "github.com/opentracing/opentracing-go"
-
- "github.com/uber/jaeger-client-go/internal/baggage"
- "github.com/uber/jaeger-client-go/internal/throttler"
- "github.com/uber/jaeger-client-go/log"
-)
-
-// TracerOption is a function that sets some option on the tracer
-type TracerOption func(tracer *Tracer)
-
-// TracerOptions is a factory for all available TracerOption's.
-var TracerOptions TracerOptionsFactory
-
-// TracerOptionsFactory is a struct that defines functions for all available TracerOption's.
-type TracerOptionsFactory struct{}
-
-// Metrics creates a TracerOption that initializes Metrics on the tracer,
-// which is used to emit statistics.
-func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption {
- return func(tracer *Tracer) {
- tracer.metrics = *m
- }
-}
-
-// Logger creates a TracerOption that gives the tracer a Logger.
-func (TracerOptionsFactory) Logger(logger Logger) TracerOption {
- return func(tracer *Tracer) {
- tracer.logger = log.DebugLogAdapter(logger)
- }
-}
-
-// CustomHeaderKeys allows to override default HTTP header keys used to propagate
-// tracing context.
-func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
- return func(tracer *Tracer) {
- if headerKeys == nil {
- return
- }
- textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
- tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
-
- httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
- tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
- }
-}
-
-// TimeNow creates a TracerOption that gives the tracer a function
-// used to generate timestamps for spans.
-func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption {
- return func(tracer *Tracer) {
- tracer.timeNow = timeNow
- }
-}
-
-// RandomNumber creates a TracerOption that gives the tracer
-// a thread-safe random number generator function for generating trace IDs.
-func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption {
- return func(tracer *Tracer) {
- tracer.randomNumber = randomNumber
- }
-}
-
-// PoolSpans creates a TracerOption that tells the tracer whether it should use
-// an object pool to minimize span allocations.
-// This should be used with care, only if the service is not running any async tasks
-// that can access parent spans after those spans have been finished.
-func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption {
- return func(tracer *Tracer) {
- if poolSpans {
- tracer.spanAllocator = newSyncPollSpanAllocator()
- } else {
- tracer.spanAllocator = simpleSpanAllocator{}
- }
- }
-}
-
-// HostIPv4 creates a TracerOption that identifies the current service/process.
-// If not set, the factory method will obtain the current IP address.
-// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
-//
-// Deprecated.
-func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption {
- return func(tracer *Tracer) {
- tracer.hostIPv4 = hostIPv4
- }
-}
-
-// Injector registers a Injector for given format.
-func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption {
- return func(tracer *Tracer) {
- tracer.injectors[format] = injector
- }
-}
-
-// Extractor registers an Extractor for given format.
-func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption {
- return func(tracer *Tracer) {
- tracer.extractors[format] = extractor
- }
-}
-
-// Observer registers an Observer.
-func (t TracerOptionsFactory) Observer(observer Observer) TracerOption {
- return t.ContribObserver(&oldObserver{obs: observer})
-}
-
-// ContribObserver registers a ContribObserver.
-func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption {
- return func(tracer *Tracer) {
- tracer.observer.append(observer)
- }
-}
-
-// Gen128Bit enables generation of 128bit trace IDs.
-func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.gen128Bit = gen128Bit
- }
-}
-
-// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context
-// when the trace is force-started via sampling=1 span tag.
-func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
- }
-}
-
-// HighTraceIDGenerator allows to override define ID generator.
-func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.highTraceIDGenerator = highTraceIDGenerator
- }
-}
-
-// MaxTagValueLength sets the limit on the max length of tag values.
-func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.maxTagValueLength = maxTagValueLength
- }
-}
-
-// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
-// value). If a span has more logs than this value, logs are dropped as
-// necessary (and replaced with a log describing how many were dropped).
-//
-// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
-// half are the newest logs.
-func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.maxLogsPerSpan = maxLogsPerSpan
- }
-}
-
-// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID
-// from the client span from the incoming request, for compatibility with Zipkin's
-// "one span per RPC" model.
-func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
- return func(tracer *Tracer) {
- tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
- }
-}
-
-// Tag adds a tracer-level tag that will be added to all spans.
-func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption {
- return func(tracer *Tracer) {
- tracer.tags = append(tracer.tags, Tag{key: key, value: value})
- }
-}
-
-// BaggageRestrictionManager registers BaggageRestrictionManager.
-func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
- return func(tracer *Tracer) {
- tracer.baggageRestrictionManager = mgr
- }
-}
-
-// DebugThrottler registers a Throttler for debug spans.
-func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption {
- return func(tracer *Tracer) {
- tracer.debugThrottler = throttler
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
deleted file mode 100644
index c5f5b19551..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "io"
-)
-
-// Transport abstracts the method of sending spans out of process.
-// Implementations are NOT required to be thread-safe; the RemoteReporter
-// is expected to only call methods on the Transport from the same go-routine.
-type Transport interface {
- // Append converts the span to the wire representation and adds it
- // to sender's internal buffer. If the buffer exceeds its designated
- // size, the transport should call Flush() and return the number of spans
- // flushed, otherwise return 0. If error is returned, the returned number
- // of spans is treated as failed span, and reported to metrics accordingly.
- Append(span *Span) (int, error)
-
- // Flush submits the internal buffer to the remote server. It returns the
- // number of spans flushed. If error is returned, the returned number of
- // spans is treated as failed span, and reported to metrics accordingly.
- Flush() (int, error)
-
- io.Closer
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
deleted file mode 100644
index 6b961fb637..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport defines various transports that can be used with
-// RemoteReporter to send spans out of process. Transport is responsible
-// for serializing the spans into a specific format suitable for sending
-// to the tracing backend. Examples may include Thrift over UDP, Thrift
-// or JSON over HTTP, Thrift over Kafka, etc.
-//
-// Implementations are NOT required to be thread-safe; the RemoteReporter
-// is expected to only call methods on the Transport from the same go-routine.
-package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
deleted file mode 100644
index 1d6f14d328..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport/http.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/uber/jaeger-client-go/thrift"
-
- "github.com/uber/jaeger-client-go"
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
-)
-
-// Default timeout for http request in seconds
-const defaultHTTPTimeout = time.Second * 5
-
-// HTTPTransport implements Transport by forwarding spans to a http server.
-type HTTPTransport struct {
- url string
- client *http.Client
- batchSize int
- spans []*j.Span
- process *j.Process
- httpCredentials *HTTPBasicAuthCredentials
- headers map[string]string
-}
-
-// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
-type HTTPBasicAuthCredentials struct {
- username string
- password string
-}
-
-// HTTPOption sets a parameter for the HttpCollector
-type HTTPOption func(c *HTTPTransport)
-
-// HTTPTimeout sets maximum timeout for http request.
-func HTTPTimeout(duration time.Duration) HTTPOption {
- return func(c *HTTPTransport) { c.client.Timeout = duration }
-}
-
-// HTTPBatchSize sets the maximum batch size, after which a collect will be
-// triggered. The default batch size is 100 spans.
-func HTTPBatchSize(n int) HTTPOption {
- return func(c *HTTPTransport) { c.batchSize = n }
-}
-
-// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
-func HTTPBasicAuth(username string, password string) HTTPOption {
- return func(c *HTTPTransport) {
- c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
- }
-}
-
-// HTTPRoundTripper configures the underlying Transport on the *http.Client
-// that is used
-func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
- return func(c *HTTPTransport) {
- c.client.Transport = transport
- }
-}
-
-// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request
-func HTTPHeaders(headers map[string]string) HTTPOption {
- return func(c *HTTPTransport) {
- c.headers = headers
- }
-}
-
-// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
-// url of the collector to handle POST request, typically something like:
-// http://hostname:14268/api/traces?format=jaeger.thrift
-func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
- c := &HTTPTransport{
- url: url,
- client: &http.Client{Timeout: defaultHTTPTimeout},
- batchSize: 100,
- spans: []*j.Span{},
- }
-
- for _, option := range options {
- option(c)
- }
- return c
-}
-
-// Append implements Transport.
-func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
- if c.process == nil {
- c.process = jaeger.BuildJaegerProcessThrift(span)
- }
- jSpan := jaeger.BuildJaegerThrift(span)
- c.spans = append(c.spans, jSpan)
- if len(c.spans) >= c.batchSize {
- return c.Flush()
- }
- return 0, nil
-}
-
-// Flush implements Transport.
-func (c *HTTPTransport) Flush() (int, error) {
- count := len(c.spans)
- if count == 0 {
- return 0, nil
- }
- err := c.send(c.spans)
- c.spans = c.spans[:0]
- return count, err
-}
-
-// Close implements Transport.
-func (c *HTTPTransport) Close() error {
- return nil
-}
-
-func (c *HTTPTransport) send(spans []*j.Span) error {
- batch := &j.Batch{
- Spans: spans,
- Process: c.process,
- }
- body, err := serializeThrift(batch)
- if err != nil {
- return err
- }
- req, err := http.NewRequest("POST", c.url, body)
- if err != nil {
- return err
- }
- req.Header.Set("Content-Type", "application/x-thrift")
- for k, v := range c.headers {
- req.Header.Set(k, v)
- }
-
- if c.httpCredentials != nil {
- req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
- }
-
- resp, err := c.client.Do(req)
- if err != nil {
- return err
- }
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
- if resp.StatusCode >= http.StatusBadRequest {
- return fmt.Errorf("error from collector: %d", resp.StatusCode)
- }
- return nil
-}
-
-func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
- t := thrift.NewTMemoryBuffer()
- p := thrift.NewTBinaryProtocolTransport(t)
- if err := obj.Write(context.Background(), p); err != nil {
- return nil, err
- }
- return t.Buffer, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
deleted file mode 100644
index 00004124c8..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/uber/jaeger-client-go/internal/reporterstats"
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/thrift"
- j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-// Empirically obtained constant for how many bytes in the message are used for envelope.
-// The total datagram size is:
-// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
-//
-// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
-// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
-//
-// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
-const emitBatchOverhead = 70
-
-var errSpanTooLarge = errors.New("span is too large")
-
-type udpSender struct {
- client *utils.AgentClientUDP
- maxPacketSize int // max size of datagram in bytes
- maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram
- byteBufferSize int // current number of span bytes accumulated in the buffer
- spanBuffer []*j.Span // spans buffered before a flush
- thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
- thriftProtocol thrift.TProtocol
- process *j.Process
- processByteSize int
-
- // reporterStats provides access to stats that are only known to Reporter
- reporterStats reporterstats.ReporterStats
-
- // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
- // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
- batchSeqNo int64
- tooLargeDroppedSpans int64
- failedToEmitSpans int64
-}
-
-// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
-// be passed to NewUDPTransportWithParams.
-type UDPTransportParams struct {
- utils.AgentClientUDPParams
-}
-
-// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
-// TODO: (breaking change) move to transport/ package.
-func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
- if len(params.HostPort) == 0 {
- params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
- }
-
- if params.Logger == nil {
- params.Logger = log.StdLogger
- }
-
- if params.MaxPacketSize == 0 {
- params.MaxPacketSize = utils.UDPPacketMaxLength
- }
-
- protocolFactory := thrift.NewTCompactProtocolFactory()
-
- // Each span is first written to thriftBuffer to determine its size in bytes.
- thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
- thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
-
- client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
- if err != nil {
- return nil, err
- }
-
- return &udpSender{
- client: client,
- maxSpanBytes: params.MaxPacketSize - emitBatchOverhead,
- thriftBuffer: thriftBuffer,
- thriftProtocol: thriftProtocol,
- }, nil
-}
-
-// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
-// TODO: (breaking change) move to transport/ package.
-func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
- return NewUDPTransportWithParams(UDPTransportParams{
- AgentClientUDPParams: utils.AgentClientUDPParams{
- HostPort: hostPort,
- MaxPacketSize: maxPacketSize,
- },
- })
-}
-
-// SetReporterStats implements reporterstats.Receiver.
-func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
- s.reporterStats = rs
-}
-
-func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
- s.thriftBuffer.Reset()
- _ = thriftStruct.Write(context.Background(), s.thriftProtocol)
- return s.thriftBuffer.Len()
-}
-
-func (s *udpSender) Append(span *Span) (int, error) {
- if s.process == nil {
- s.process = BuildJaegerProcessThrift(span)
- s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
- s.byteBufferSize += s.processByteSize
- }
- jSpan := BuildJaegerThrift(span)
- spanSize := s.calcSizeOfSerializedThrift(jSpan)
- if spanSize > s.maxSpanBytes {
- s.tooLargeDroppedSpans++
- return 1, errSpanTooLarge
- }
-
- s.byteBufferSize += spanSize
- if s.byteBufferSize <= s.maxSpanBytes {
- s.spanBuffer = append(s.spanBuffer, jSpan)
- if s.byteBufferSize < s.maxSpanBytes {
- return 0, nil
- }
- return s.Flush()
- }
- // the latest span did not fit in the buffer
- n, err := s.Flush()
- s.spanBuffer = append(s.spanBuffer, jSpan)
- s.byteBufferSize = spanSize + s.processByteSize
- return n, err
-}
-
-func (s *udpSender) Flush() (int, error) {
- n := len(s.spanBuffer)
- if n == 0 {
- return 0, nil
- }
- s.batchSeqNo++
- batchSeqNo := int64(s.batchSeqNo)
- err := s.client.EmitBatch(context.Background(), &j.Batch{
- Process: s.process,
- Spans: s.spanBuffer,
- SeqNo: &batchSeqNo,
- Stats: s.makeStats(),
- })
- s.resetBuffers()
- if err != nil {
- s.failedToEmitSpans += int64(n)
- }
- return n, err
-}
-
-func (s *udpSender) Close() error {
- return s.client.Close()
-}
-
-func (s *udpSender) resetBuffers() {
- for i := range s.spanBuffer {
- s.spanBuffer[i] = nil
- }
- s.spanBuffer = s.spanBuffer[:0]
- s.byteBufferSize = s.processByteSize
-}
-
-func (s *udpSender) makeStats() *j.ClientStats {
- var dropped int64
- if s.reporterStats != nil {
- dropped = s.reporterStats.SpansDroppedFromQueue()
- }
- return &j.ClientStats{
- FullQueueDroppedSpans: dropped,
- TooLargeDroppedSpans: s.tooLargeDroppedSpans,
- FailedToEmitSpans: s.failedToEmitSpans,
- }
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
deleted file mode 100644
index 237211f822..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
-)
-
-// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
-func GetJSON(url string, out interface{}) error {
- resp, err := http.Get(url)
- if err != nil {
- return err
- }
- return ReadJSON(resp, out)
-}
-
-// ReadJSON reads JSON from http.Response and parses it into `out`
-func ReadJSON(resp *http.Response, out interface{}) error {
- defer resp.Body.Close()
-
- if resp.StatusCode >= 400 {
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return err
- }
-
- return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
- }
-
- if out == nil {
- io.Copy(ioutil.Discard, resp.Body)
- return nil
- }
-
- decoder := json.NewDecoder(resp.Body)
- return decoder.Decode(out)
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
deleted file mode 100644
index b51af7713f..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/localip.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "errors"
- "net"
-)
-
-// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
-
-// scoreAddr scores how likely the given addr is to be a remote address and returns the
-// IP to use when listening. Any address which receives a negative score should not be used.
-// Scores are calculated as:
-// -1 for any unknown IP addresses.
-// +300 for IPv4 addresses
-// +100 for non-local addresses, extra +100 for "up" interaces.
-func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
- var ip net.IP
- if netAddr, ok := addr.(*net.IPNet); ok {
- ip = netAddr.IP
- } else if netIP, ok := addr.(*net.IPAddr); ok {
- ip = netIP.IP
- } else {
- return -1, nil
- }
-
- var score int
- if ip.To4() != nil {
- score += 300
- }
- if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
- score += 100
- if iface.Flags&net.FlagUp != 0 {
- score += 100
- }
- }
- return score, ip
-}
-
-// HostIP tries to find an IP that can be used by other machines to reach this machine.
-func HostIP() (net.IP, error) {
- interfaces, err := net.Interfaces()
- if err != nil {
- return nil, err
- }
-
- bestScore := -1
- var bestIP net.IP
- // Select the highest scoring IP as the best IP.
- for _, iface := range interfaces {
- addrs, err := iface.Addrs()
- if err != nil {
- // Skip this interface if there is an error.
- continue
- }
-
- for _, addr := range addrs {
- score, ip := scoreAddr(iface, addr)
- if score > bestScore {
- bestScore = score
- bestIP = ip
- }
- }
- }
-
- if bestScore == -1 {
- return nil, errors.New("no addresses to listen on")
- }
-
- return bestIP, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
deleted file mode 100644
index 9875f7f55c..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/rand.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "math/rand"
- "sync"
-)
-
-// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
-// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
-type lockedSource struct {
- mut sync.Mutex
- src rand.Source
-}
-
-// NewRand returns a rand.Rand that is threadsafe.
-func NewRand(seed int64) *rand.Rand {
- return rand.New(&lockedSource{src: rand.NewSource(seed)})
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.mut.Lock()
- n = r.src.Int63()
- r.mut.Unlock()
- return
-}
-
-// Seed implements Seed() of Source
-func (r *lockedSource) Seed(seed int64) {
- r.mut.Lock()
- r.src.Seed(seed)
- r.mut.Unlock()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
deleted file mode 100644
index bf2f13165b..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "sync"
- "time"
-)
-
-// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
-//
-// TODO (breaking change) remove this interface in favor of public struct below
-//
-// Deprecated, use ReconfigurableRateLimiter.
-type RateLimiter interface {
- CheckCredit(itemCost float64) bool
-}
-
-// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
-// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
-// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
-// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
-// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
-//
-// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
-// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
-// to determine if the message is within the rate limit.
-//
-// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
-// as bytes/second, and calling CheckCredit() with the actual message size.
-//
-// TODO (breaking change) rename to RateLimiter once the interface is removed
-type ReconfigurableRateLimiter struct {
- lock sync.Mutex
-
- creditsPerSecond float64
- balance float64
- maxBalance float64
- lastTick time.Time
-
- timeNow func() time.Time
-}
-
-// NewRateLimiter creates a new ReconfigurableRateLimiter.
-func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
- return &ReconfigurableRateLimiter{
- creditsPerSecond: creditsPerSecond,
- balance: maxBalance,
- maxBalance: maxBalance,
- lastTick: time.Now(),
- timeNow: time.Now,
- }
-}
-
-// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
-// is not lest than itemCost.
-func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
- rl.lock.Lock()
- defer rl.lock.Unlock()
-
- // if we have enough credits to pay for current item, then reduce balance and allow
- if rl.balance >= itemCost {
- rl.balance -= itemCost
- return true
- }
- // otherwise check if balance can be increased due to time elapsed, and try again
- rl.updateBalance()
- if rl.balance >= itemCost {
- rl.balance -= itemCost
- return true
- }
- return false
-}
-
-// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
-func (rl *ReconfigurableRateLimiter) updateBalance() {
- // calculate how much time passed since the last tick, and update current tick
- currentTime := rl.timeNow()
- elapsedTime := currentTime.Sub(rl.lastTick)
- rl.lastTick = currentTime
- // calculate how much credit have we accumulated since the last tick
- rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
- if rl.balance > rl.maxBalance {
- rl.balance = rl.maxBalance
- }
-}
-
-// Update changes the main parameters of the rate limiter in-place, while retaining
-// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
-// instead of creating a new rate limiter helps to avoid thundering herd when sampling
-// strategies are updated.
-func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
- rl.lock.Lock()
- defer rl.lock.Unlock()
-
- rl.updateBalance() // get up to date balance
- rl.balance = rl.balance * maxBalance / rl.maxBalance
- rl.creditsPerSecond = creditsPerSecond
- rl.maxBalance = maxBalance
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
deleted file mode 100644
index 0dffc7fa24..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (c) 2020 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "fmt"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/uber/jaeger-client-go/log"
-)
-
-// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-type reconnectingUDPConn struct {
- hostPort string
- resolveFunc resolveFunc
- dialFunc dialFunc
- logger log.Logger
- bufferBytes int64
-
- connMtx sync.RWMutex
- conn *net.UDPConn
- destAddr *net.UDPAddr
- closeChan chan struct{}
-}
-
-type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
-type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
-
-// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) {
- conn := &reconnectingUDPConn{
- hostPort: hostPort,
- resolveFunc: resolveFunc,
- dialFunc: dialFunc,
- logger: logger,
- closeChan: make(chan struct{}),
- }
-
- if err := conn.attemptResolveAndDial(); err != nil {
- logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout))
- }
-
- go conn.reconnectLoop(resolveTimeout)
-
- return conn, nil
-}
-
-func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
- ticker := time.NewTicker(resolveTimeout)
- defer ticker.Stop()
-
- for {
- select {
- case <-c.closeChan:
- return
- case <-ticker.C:
- if err := c.attemptResolveAndDial(); err != nil {
- c.logger.Error(err.Error())
- }
- }
- }
-}
-
-func (c *reconnectingUDPConn) attemptResolveAndDial() error {
- newAddr, err := c.resolveFunc("udp", c.hostPort)
- if err != nil {
- return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
- }
-
- c.connMtx.RLock()
- curAddr := c.destAddr
- c.connMtx.RUnlock()
-
- // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
- if curAddr != nil && newAddr.String() == curAddr.String() {
- return nil
- }
-
- if err := c.attemptDialNewAddr(newAddr); err != nil {
- return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
- }
-
- return nil
-}
-
-func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
- connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
- if err != nil {
- return err
- }
-
- if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
- if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
- return err
- }
- }
-
- c.connMtx.Lock()
- c.destAddr = newAddr
- // store prev to close later
- prevConn := c.conn
- c.conn = connUDP
- c.connMtx.Unlock()
-
- if prevConn != nil {
- return prevConn.Close()
- }
-
- return nil
-}
-
-// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning
-func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
- var bytesWritten int
- var err error
-
- c.connMtx.RLock()
- if c.conn == nil {
- // if connection is not initialized indicate this with err in order to hook into retry logic
- err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
- } else {
- bytesWritten, err = c.conn.Write(b)
- }
- c.connMtx.RUnlock()
-
- if err == nil {
- return bytesWritten, nil
- }
-
- // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
- if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
- c.connMtx.RLock()
- defer c.connMtx.RUnlock()
- return c.conn.Write(b)
- }
-
- // return original error if reconn fails
- return bytesWritten, err
-}
-
-// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation
-func (c *reconnectingUDPConn) Close() error {
- close(c.closeChan)
-
- // acquire rw lock before closing conn to ensure calls to Write drain
- c.connMtx.Lock()
- defer c.connMtx.Unlock()
-
- if c.conn != nil {
- return c.conn.Close()
- }
-
- return nil
-}
-
-// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
-// and SetWriteBuffer is called store bufferBytes to be set for new conns
-func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
- var err error
-
- c.connMtx.RLock()
- if c.conn != nil {
- err = c.conn.SetWriteBuffer(bytes)
- }
- c.connMtx.RUnlock()
-
- if err == nil {
- atomic.StoreInt64(&c.bufferBytes, int64(bytes))
- }
-
- return err
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
deleted file mode 100644
index 4c59ae9dd8..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "time"
-
- "github.com/uber/jaeger-client-go/log"
- "github.com/uber/jaeger-client-go/thrift"
-
- "github.com/uber/jaeger-client-go/thrift-gen/agent"
- "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
- "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
-)
-
-// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
-const UDPPacketMaxLength = 65000
-
-// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
-type AgentClientUDP struct {
- agent.Agent
- io.Closer
-
- connUDP udpConn
- client *agent.AgentClient
- maxPacketSize int // max size of datagram in bytes
- thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
-}
-
-type udpConn interface {
- Write([]byte) (int, error)
- SetWriteBuffer(int) error
- Close() error
-}
-
-// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should
-// be passed to NewAgentClientUDPWithParams.
-type AgentClientUDPParams struct {
- HostPort string
- MaxPacketSize int
- Logger log.Logger
- DisableAttemptReconnecting bool
- AttemptReconnectInterval time.Duration
-}
-
-// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP.
-func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) {
- // validate hostport
- if _, _, err := net.SplitHostPort(params.HostPort); err != nil {
- return nil, err
- }
-
- if params.MaxPacketSize == 0 {
- params.MaxPacketSize = UDPPacketMaxLength
- }
-
- if params.Logger == nil {
- params.Logger = log.StdLogger
- }
-
- if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 {
- params.AttemptReconnectInterval = time.Second * 30
- }
-
- thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
- protocolFactory := thrift.NewTCompactProtocolFactory()
- client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
-
- var connUDP udpConn
- var err error
-
- if params.DisableAttemptReconnecting {
- destAddr, err := net.ResolveUDPAddr("udp", params.HostPort)
- if err != nil {
- return nil, err
- }
-
- connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
- if err != nil {
- return nil, err
- }
- } else {
- // host is hostname, setup resolver loop in case host record changes during operation
- connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
- if err != nil {
- return nil, err
- }
- }
-
- if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
- return nil, err
- }
-
- return &AgentClientUDP{
- connUDP: connUDP,
- client: client,
- maxPacketSize: params.MaxPacketSize,
- thriftBuffer: thriftBuffer,
- }, nil
-}
-
-// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
-func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
- return NewAgentClientUDPWithParams(AgentClientUDPParams{
- HostPort: hostPort,
- MaxPacketSize: maxPacketSize,
- })
-}
-
-// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
-func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error {
- return errors.New("Not implemented")
-}
-
-// EmitBatch implements EmitBatch() of Agent interface
-func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error {
- a.thriftBuffer.Reset()
- if err := a.client.EmitBatch(ctx, batch); err != nil {
- return err
- }
- if a.thriftBuffer.Len() > a.maxPacketSize {
- return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
- a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
- }
- _, err := a.connUDP.Write(a.thriftBuffer.Bytes())
- return err
-}
-
-// Close implements Close() of io.Closer and closes the underlying UDP connection.
-func (a *AgentClientUDP) Close() error {
- return a.connUDP.Close()
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
deleted file mode 100644
index ac3c325d1e..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/utils/utils.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "encoding/binary"
- "errors"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- // ErrEmptyIP an error for empty ip strings
- ErrEmptyIP = errors.New("empty string given for ip")
-
- // ErrNotHostColonPort an error for invalid host port string
- ErrNotHostColonPort = errors.New("expecting host:port")
-
- // ErrNotFourOctets an error for the wrong number of octets after splitting a string
- ErrNotFourOctets = errors.New("Wrong number of octets")
-)
-
-// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
-func ParseIPToUint32(ip string) (uint32, error) {
- if ip == "" {
- return 0, ErrEmptyIP
- }
-
- if ip == "localhost" {
- return 127<<24 | 1, nil
- }
-
- octets := strings.Split(ip, ".")
- if len(octets) != 4 {
- return 0, ErrNotFourOctets
- }
-
- var intIP uint32
- for i := 0; i < 4; i++ {
- octet, err := strconv.Atoi(octets[i])
- if err != nil {
- return 0, err
- }
- intIP = (intIP << 8) | uint32(octet)
- }
-
- return intIP, nil
-}
-
-// ParsePort converts port number from string to uin16
-func ParsePort(portString string) (uint16, error) {
- port, err := strconv.ParseUint(portString, 10, 16)
- return uint16(port), err
-}
-
-// PackIPAsUint32 packs an IPv4 as uint32
-func PackIPAsUint32(ip net.IP) uint32 {
- if ipv4 := ip.To4(); ipv4 != nil {
- return binary.BigEndian.Uint32(ipv4)
- }
- return 0
-}
-
-// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
-// representing time since epoch in microseconds, which is used expected
-// in the Jaeger spans encoded as Thrift.
-func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
- // ^^^ Passing time.Time by value is faster than passing a pointer!
- // BenchmarkTimeByValue-8 2000000000 1.37 ns/op
- // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op
-
- return t.UnixNano() / 1000
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
deleted file mode 100644
index 98cab4b6ef..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/zipkin.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "github.com/opentracing/opentracing-go"
-)
-
-// ZipkinSpanFormat is an OpenTracing carrier format constant
-const ZipkinSpanFormat = "zipkin-span-format"
-
-// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
-// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
-type ExtractableZipkinSpan interface {
- TraceID() uint64
- SpanID() uint64
- ParentID() uint64
- Flags() byte
-}
-
-// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
-// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
-type InjectableZipkinSpan interface {
- SetTraceID(traceID uint64)
- SetSpanID(spanID uint64)
- SetParentID(parentID uint64)
- SetFlags(flags byte)
-}
-
-type zipkinPropagator struct {
- tracer *Tracer
-}
-
-func (p *zipkinPropagator) Inject(
- ctx SpanContext,
- abstractCarrier interface{},
-) error {
- carrier, ok := abstractCarrier.(InjectableZipkinSpan)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
- carrier.SetSpanID(uint64(ctx.SpanID()))
- carrier.SetParentID(uint64(ctx.ParentID()))
- carrier.SetFlags(ctx.samplingState.flags())
- return nil
-}
-
-func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
- carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
- if !ok {
- return emptyContext, opentracing.ErrInvalidCarrier
- }
- if carrier.TraceID() == 0 {
- return emptyContext, opentracing.ErrSpanContextNotFound
- }
- var ctx SpanContext
- ctx.traceID.Low = carrier.TraceID()
- ctx.spanID = SpanID(carrier.SpanID())
- ctx.parentID = SpanID(carrier.ParentID())
- ctx.samplingState = &samplingState{}
- ctx.samplingState.setFlags(carrier.Flags())
- return ctx, nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
deleted file mode 100644
index 73aeb000f8..0000000000
--- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
- "encoding/binary"
- "fmt"
- "time"
-
- "github.com/opentracing/opentracing-go/ext"
-
- "github.com/uber/jaeger-client-go/internal/spanlog"
- z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
- "github.com/uber/jaeger-client-go/utils"
-)
-
-const (
- // Zipkin UI does not work well with non-string tag values
- allowPackedNumbers = false
-)
-
-var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
- string(ext.SpanKind): setSpanKind,
- string(ext.PeerHostIPv4): setPeerIPv4,
- string(ext.PeerPort): setPeerPort,
- string(ext.PeerService): setPeerService,
- TracerIPTagKey: removeTag,
-}
-
-// BuildZipkinThrift builds thrift span based on internal span.
-// TODO: (breaking change) move to transport/zipkin and make private.
-func BuildZipkinThrift(s *Span) *z.Span {
- span := &zipkinSpan{Span: s}
- span.handleSpecialTags()
- parentID := int64(span.context.parentID)
- var ptrParentID *int64
- if parentID != 0 {
- ptrParentID = &parentID
- }
- traceIDHigh := int64(span.context.traceID.High)
- var ptrTraceIDHigh *int64
- if traceIDHigh != 0 {
- ptrTraceIDHigh = &traceIDHigh
- }
- timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
- duration := span.duration.Nanoseconds() / int64(time.Microsecond)
- endpoint := &z.Endpoint{
- ServiceName: span.tracer.serviceName,
- Ipv4: int32(span.tracer.hostIPv4)}
- thriftSpan := &z.Span{
- TraceID: int64(span.context.traceID.Low),
- TraceIDHigh: ptrTraceIDHigh,
- ID: int64(span.context.spanID),
- ParentID: ptrParentID,
- Name: span.operationName,
- Timestamp: ×tamp,
- Duration: &duration,
- Debug: span.context.IsDebug(),
- Annotations: buildAnnotations(span, endpoint),
- BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
- return thriftSpan
-}
-
-func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
- // automatically adding 2 Zipkin CoreAnnotations
- annotations := make([]*z.Annotation, 0, 2+len(span.logs))
- var startLabel, endLabel string
- if span.spanKind == string(ext.SpanKindRPCClientEnum) {
- startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
- } else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
- startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
- }
- if !span.startTime.IsZero() && startLabel != "" {
- start := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
- Value: startLabel,
- Host: endpoint}
- annotations = append(annotations, start)
- if span.duration != 0 {
- endTs := span.startTime.Add(span.duration)
- end := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
- Value: endLabel,
- Host: endpoint}
- annotations = append(annotations, end)
- }
- }
- for _, log := range span.logs {
- anno := &z.Annotation{
- Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
- Host: endpoint}
- if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
- anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
- } else {
- anno.Value = err.Error()
- }
- annotations = append(annotations, anno)
- }
- return annotations
-}
-
-func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
- // automatically adding local component or server/client address tag, and client version
- annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
-
- if span.peerDefined() && span.isRPC() {
- peer := z.Endpoint{
- Ipv4: span.peer.Ipv4,
- Port: span.peer.Port,
- ServiceName: span.peer.ServiceName}
- label := z.CLIENT_ADDR
- if span.isRPCClient() {
- label = z.SERVER_ADDR
- }
- anno := &z.BinaryAnnotation{
- Key: label,
- Value: []byte{1},
- AnnotationType: z.AnnotationType_BOOL,
- Host: &peer}
- annotations = append(annotations, anno)
- }
- if !span.isRPC() {
- componentName := endpoint.ServiceName
- for _, tag := range span.tags {
- if tag.key == string(ext.Component) {
- componentName = stringify(tag.value)
- break
- }
- }
- local := &z.BinaryAnnotation{
- Key: z.LOCAL_COMPONENT,
- Value: []byte(componentName),
- AnnotationType: z.AnnotationType_STRING,
- Host: endpoint}
- annotations = append(annotations, local)
- }
- for _, tag := range span.tags {
- // "Special tags" are already handled by this point, we'd be double reporting the
- // tags if we don't skip here
- if _, ok := specialTagHandlers[tag.key]; ok {
- continue
- }
- if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
- annotations = append(annotations, anno)
- }
- }
- return annotations
-}
-
-func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
- bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
- if value, ok := val.(string); ok {
- bann.Value = []byte(truncateString(value, maxTagValueLength))
- bann.AnnotationType = z.AnnotationType_STRING
- } else if value, ok := val.([]byte); ok {
- if len(value) > maxTagValueLength {
- value = value[:maxTagValueLength]
- }
- bann.Value = value
- bann.AnnotationType = z.AnnotationType_BYTES
- } else if value, ok := val.(int32); ok && allowPackedNumbers {
- bann.Value = int32ToBytes(value)
- bann.AnnotationType = z.AnnotationType_I32
- } else if value, ok := val.(int64); ok && allowPackedNumbers {
- bann.Value = int64ToBytes(value)
- bann.AnnotationType = z.AnnotationType_I64
- } else if value, ok := val.(int); ok && allowPackedNumbers {
- bann.Value = int64ToBytes(int64(value))
- bann.AnnotationType = z.AnnotationType_I64
- } else if value, ok := val.(bool); ok {
- bann.Value = []byte{boolToByte(value)}
- bann.AnnotationType = z.AnnotationType_BOOL
- } else {
- value := stringify(val)
- bann.Value = []byte(truncateString(value, maxTagValueLength))
- bann.AnnotationType = z.AnnotationType_STRING
- }
- return bann
-}
-
-func stringify(value interface{}) string {
- if s, ok := value.(string); ok {
- return s
- }
- return fmt.Sprintf("%+v", value)
-}
-
-func truncateString(value string, maxLength int) string {
- // we ignore the problem of utf8 runes possibly being sliced in the middle,
- // as it is rather expensive to iterate through each tag just to find rune
- // boundaries.
- if len(value) > maxLength {
- return value[:maxLength]
- }
- return value
-}
-
-func boolToByte(b bool) byte {
- if b {
- return 1
- }
- return 0
-}
-
-// int32ToBytes converts int32 to bytes.
-func int32ToBytes(i int32) []byte {
- buf := make([]byte, 4)
- binary.BigEndian.PutUint32(buf, uint32(i))
- return buf
-}
-
-// int64ToBytes converts int64 to bytes.
-func int64ToBytes(i int64) []byte {
- buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, uint64(i))
- return buf
-}
-
-type zipkinSpan struct {
- *Span
-
- // peer points to the peer service participating in this span,
- // e.g. the Client if this span is a server span,
- // or Server if this span is a client span
- peer struct {
- Ipv4 int32
- Port int16
- ServiceName string
- }
-
- // used to distinguish local vs. RPC Server vs. RPC Client spans
- spanKind string
-}
-
-func (s *zipkinSpan) handleSpecialTags() {
- s.Lock()
- defer s.Unlock()
- if s.firstInProcess {
- // append the process tags
- s.tags = append(s.tags, s.tracer.tags...)
- }
- filteredTags := make([]Tag, 0, len(s.tags))
- for _, tag := range s.tags {
- if handler, ok := specialTagHandlers[tag.key]; ok {
- handler(s, tag.value)
- } else {
- filteredTags = append(filteredTags, tag)
- }
- }
- s.tags = filteredTags
-}
-
-func setSpanKind(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- s.spanKind = val
- return
- }
- if val, ok := value.(ext.SpanKindEnum); ok {
- s.spanKind = string(val)
- }
-}
-
-func setPeerIPv4(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- if ip, err := utils.ParseIPToUint32(val); err == nil {
- s.peer.Ipv4 = int32(ip)
- return
- }
- }
- if val, ok := value.(uint32); ok {
- s.peer.Ipv4 = int32(val)
- return
- }
- if val, ok := value.(int32); ok {
- s.peer.Ipv4 = val
- }
-}
-
-func setPeerPort(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- if port, err := utils.ParsePort(val); err == nil {
- s.peer.Port = int16(port)
- return
- }
- }
- if val, ok := value.(uint16); ok {
- s.peer.Port = int16(val)
- return
- }
- if val, ok := value.(int); ok {
- s.peer.Port = int16(val)
- }
-}
-
-func setPeerService(s *zipkinSpan, value interface{}) {
- if val, ok := value.(string); ok {
- s.peer.ServiceName = val
- }
-}
-
-func removeTag(s *zipkinSpan, value interface{}) {}
-
-func (s *zipkinSpan) peerDefined() bool {
- return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
-}
-
-func (s *zipkinSpan) isRPC() bool {
- s.RLock()
- defer s.RUnlock()
- return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
-}
-
-func (s *zipkinSpan) isRPCClient() bool {
- s.RLock()
- defer s.RUnlock()
- return s.spanKind == string(ext.SpanKindRPCClientEnum)
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
deleted file mode 100644
index 2a6a43efdb..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/counter.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Counter tracks the number of times an event has occurred
-type Counter interface {
- // Inc adds the given value to the counter.
- Inc(int64)
-}
-
-// NullCounter counter that does nothing
-var NullCounter Counter = nullCounter{}
-
-type nullCounter struct{}
-
-func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
deleted file mode 100644
index 0ead061ebd..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/factory.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// NSOptions defines the name and tags map associated with a factory namespace
-type NSOptions struct {
- Name string
- Tags map[string]string
-}
-
-// Options defines the information associated with a metric
-type Options struct {
- Name string
- Tags map[string]string
- Help string
-}
-
-// TimerOptions defines the information associated with a metric
-type TimerOptions struct {
- Name string
- Tags map[string]string
- Help string
- Buckets []time.Duration
-}
-
-// HistogramOptions defines the information associated with a metric
-type HistogramOptions struct {
- Name string
- Tags map[string]string
- Help string
- Buckets []float64
-}
-
-// Factory creates new metrics
-type Factory interface {
- Counter(metric Options) Counter
- Timer(metric TimerOptions) Timer
- Gauge(metric Options) Gauge
- Histogram(metric HistogramOptions) Histogram
-
- // Namespace returns a nested metrics factory.
- Namespace(scope NSOptions) Factory
-}
-
-// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
-var NullFactory Factory = nullFactory{}
-
-type nullFactory struct{}
-
-func (nullFactory) Counter(options Options) Counter {
- return NullCounter
-}
-func (nullFactory) Timer(options TimerOptions) Timer {
- return NullTimer
-}
-func (nullFactory) Gauge(options Options) Gauge {
- return NullGauge
-}
-func (nullFactory) Histogram(options HistogramOptions) Histogram {
- return NullHistogram
-}
-func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
deleted file mode 100644
index 3c606391a0..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Gauge returns instantaneous measurements of something as an int64 value
-type Gauge interface {
- // Update the gauge to the value passed in.
- Update(int64)
-}
-
-// NullGauge gauge that does nothing
-var NullGauge Gauge = nullGauge{}
-
-type nullGauge struct{}
-
-func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
deleted file mode 100644
index d3bd6174fe..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2018 The Jaeger Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-// Histogram that keeps track of a distribution of values.
-type Histogram interface {
- // Records the value passed in.
- Record(float64)
-}
-
-// NullHistogram that does nothing
-var NullHistogram Histogram = nullHistogram{}
-
-type nullHistogram struct{}
-
-func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
deleted file mode 100644
index c24445a106..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/keys.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "sort"
-)
-
-// GetKey converts name+tags into a single string of the form
-// "name|tag1=value1|...|tagN=valueN", where tag names are
-// sorted alphabetically.
-func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
- keys := make([]string, 0, len(tags))
- for k := range tags {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- key := name
- for _, k := range keys {
- key = key + tagsSep + k + tagKVSep + tags[k]
- }
- return key
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
deleted file mode 100644
index 0df0c662e3..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
-//
-// It uses reflection to initialize a struct containing metrics fields
-// by assigning new Counter/Gauge/Timer values with the metric name retrieved
-// from the `metric` tag and stats tags retrieved from the `tags` tag.
-//
-// Note: all fields of the struct must be exported, have a `metric` tag, and be
-// of type Counter or Gauge or Timer.
-//
-// Errors during Init lead to a panic.
-func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
- if err := Init(metrics, factory, globalTags); err != nil {
- panic(err.Error())
- }
-}
-
-// Init does the same as MustInit, but returns an error instead of
-// panicking.
-func Init(m interface{}, factory Factory, globalTags map[string]string) error {
- // Allow user to opt out of reporting metrics by passing in nil.
- if factory == nil {
- factory = NullFactory
- }
-
- counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
- gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
- timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
- histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
-
- v := reflect.ValueOf(m).Elem()
- t := v.Type()
- for i := 0; i < t.NumField(); i++ {
- tags := make(map[string]string)
- for k, v := range globalTags {
- tags[k] = v
- }
- var buckets []float64
- field := t.Field(i)
- metric := field.Tag.Get("metric")
- if metric == "" {
- return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
- }
- if tagString := field.Tag.Get("tags"); tagString != "" {
- tagPairs := strings.Split(tagString, ",")
- for _, tagPair := range tagPairs {
- tag := strings.Split(tagPair, "=")
- if len(tag) != 2 {
- return fmt.Errorf(
- "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
- field.Name, tagPair, tagString)
- }
- tags[tag[0]] = tag[1]
- }
- }
- if bucketString := field.Tag.Get("buckets"); bucketString != "" {
- if field.Type.AssignableTo(timerPtrType) {
- // TODO: Parse timer duration buckets
- return fmt.Errorf(
- "Field [%s]: Buckets are not currently initialized for timer metrics",
- field.Name)
- } else if field.Type.AssignableTo(histogramPtrType) {
- bucketValues := strings.Split(bucketString, ",")
- for _, bucket := range bucketValues {
- b, err := strconv.ParseFloat(bucket, 64)
- if err != nil {
- return fmt.Errorf(
- "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
- field.Name, bucket, bucketString)
- }
- buckets = append(buckets, b)
- }
- } else {
- return fmt.Errorf(
- "Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
- field.Name)
- }
- }
- help := field.Tag.Get("help")
- var obj interface{}
- if field.Type.AssignableTo(counterPtrType) {
- obj = factory.Counter(Options{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(gaugePtrType) {
- obj = factory.Gauge(Options{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(timerPtrType) {
- // TODO: Add buckets once parsed (see TODO above)
- obj = factory.Timer(TimerOptions{
- Name: metric,
- Tags: tags,
- Help: help,
- })
- } else if field.Type.AssignableTo(histogramPtrType) {
- obj = factory.Histogram(HistogramOptions{
- Name: metric,
- Tags: tags,
- Help: help,
- Buckets: buckets,
- })
- } else {
- return fmt.Errorf(
- "Field %s is not a pointer to timer, gauge, or counter",
- field.Name)
- }
- v.Field(i).Set(reflect.ValueOf(obj))
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/prometheus/cache.go b/vendor/github.com/uber/jaeger-lib/metrics/prometheus/cache.go
deleted file mode 100644
index 40791ebb70..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/prometheus/cache.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2017 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "strings"
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// vectorCache is used to avoid creating Prometheus vectors with the same set of labels more than once.
-type vectorCache struct {
- registerer prometheus.Registerer
- lock sync.Mutex
- cVecs map[string]*prometheus.CounterVec
- gVecs map[string]*prometheus.GaugeVec
- hVecs map[string]*prometheus.HistogramVec
-}
-
-func newVectorCache(registerer prometheus.Registerer) *vectorCache {
- return &vectorCache{
- registerer: registerer,
- cVecs: make(map[string]*prometheus.CounterVec),
- gVecs: make(map[string]*prometheus.GaugeVec),
- hVecs: make(map[string]*prometheus.HistogramVec),
- }
-}
-
-func (c *vectorCache) getOrMakeCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- cacheKey := c.getCacheKey(opts.Name, labelNames)
- cv, cvExists := c.cVecs[cacheKey]
- if !cvExists {
- cv = prometheus.NewCounterVec(opts, labelNames)
- c.registerer.MustRegister(cv)
- c.cVecs[cacheKey] = cv
- }
- return cv
-}
-
-func (c *vectorCache) getOrMakeGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- cacheKey := c.getCacheKey(opts.Name, labelNames)
- gv, gvExists := c.gVecs[cacheKey]
- if !gvExists {
- gv = prometheus.NewGaugeVec(opts, labelNames)
- c.registerer.MustRegister(gv)
- c.gVecs[cacheKey] = gv
- }
- return gv
-}
-
-func (c *vectorCache) getOrMakeHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- cacheKey := c.getCacheKey(opts.Name, labelNames)
- hv, hvExists := c.hVecs[cacheKey]
- if !hvExists {
- hv = prometheus.NewHistogramVec(opts, labelNames)
- c.registerer.MustRegister(hv)
- c.hVecs[cacheKey] = hv
- }
- return hv
-}
-
-func (c *vectorCache) getCacheKey(name string, labels []string) string {
- return strings.Join(append([]string{name}, labels...), "||")
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/prometheus/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/prometheus/factory.go
deleted file mode 100644
index 199fcc7761..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/prometheus/factory.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright (c) 2017 The Jaeger Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "sort"
- "strings"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-
- "github.com/uber/jaeger-lib/metrics"
-)
-
-// Factory implements metrics.Factory backed by Prometheus registry.
-type Factory struct {
- scope string
- tags map[string]string
- cache *vectorCache
- buckets []float64
- normalizer *strings.Replacer
- separator Separator
-}
-
-type options struct {
- registerer prometheus.Registerer
- buckets []float64
- separator Separator
-}
-
-// Separator represents the namespace separator to use
-type Separator rune
-
-const (
- // SeparatorUnderscore uses an underscore as separator
- SeparatorUnderscore Separator = '_'
-
- // SeparatorColon uses a colon as separator
- SeparatorColon = ':'
-)
-
-// Option is a function that sets some option for the Factory constructor.
-type Option func(*options)
-
-// WithRegisterer returns an option that sets the registerer.
-// If not used we fallback to prometheus.DefaultRegisterer.
-func WithRegisterer(registerer prometheus.Registerer) Option {
- return func(opts *options) {
- opts.registerer = registerer
- }
-}
-
-// WithBuckets returns an option that sets the default buckets for histogram.
-// If not used, we fallback to default Prometheus buckets.
-func WithBuckets(buckets []float64) Option {
- return func(opts *options) {
- opts.buckets = buckets
- }
-}
-
-// WithSeparator returns an option that sets the default separator for the namespace
-// If not used, we fallback to underscore.
-func WithSeparator(separator Separator) Option {
- return func(opts *options) {
- opts.separator = separator
- }
-}
-
-func applyOptions(opts []Option) *options {
- options := new(options)
- for _, o := range opts {
- o(options)
- }
- if options.registerer == nil {
- options.registerer = prometheus.DefaultRegisterer
- }
- if options.separator == '\x00' {
- options.separator = SeparatorUnderscore
- }
- return options
-}
-
-// New creates a Factory backed by Prometheus registry.
-// Typically the first argument should be prometheus.DefaultRegisterer.
-//
-// Parameter buckets defines the buckets into which Timer observations are counted.
-// Each element in the slice is the upper inclusive bound of a bucket. The
-// values must be sorted in strictly increasing order. There is no need
-// to add a highest bucket with +Inf bound, it will be added
-// implicitly. The default value is prometheus.DefBuckets.
-func New(opts ...Option) *Factory {
- options := applyOptions(opts)
- return newFactory(
- &Factory{ // dummy struct to be discarded
- cache: newVectorCache(options.registerer),
- buckets: options.buckets,
- normalizer: strings.NewReplacer(".", "_", "-", "_"),
- separator: options.separator,
- },
- "", // scope
- nil) // tags
-}
-
-func newFactory(parent *Factory, scope string, tags map[string]string) *Factory {
- return &Factory{
- cache: parent.cache,
- buckets: parent.buckets,
- normalizer: parent.normalizer,
- separator: parent.separator,
- scope: scope,
- tags: tags,
- }
-}
-
-// Counter implements Counter of metrics.Factory.
-func (f *Factory) Counter(options metrics.Options) metrics.Counter {
- help := strings.TrimSpace(options.Help)
- if len(help) == 0 {
- help = options.Name
- }
- name := counterNamingConvention(f.subScope(options.Name))
- tags := f.mergeTags(options.Tags)
- labelNames := f.tagNames(tags)
- opts := prometheus.CounterOpts{
- Name: name,
- Help: help,
- }
- cv := f.cache.getOrMakeCounterVec(opts, labelNames)
- return &counter{
- counter: cv.WithLabelValues(f.tagsAsLabelValues(labelNames, tags)...),
- }
-}
-
-// Gauge implements Gauge of metrics.Factory.
-func (f *Factory) Gauge(options metrics.Options) metrics.Gauge {
- help := strings.TrimSpace(options.Help)
- if len(help) == 0 {
- help = options.Name
- }
- name := f.subScope(options.Name)
- tags := f.mergeTags(options.Tags)
- labelNames := f.tagNames(tags)
- opts := prometheus.GaugeOpts{
- Name: name,
- Help: help,
- }
- gv := f.cache.getOrMakeGaugeVec(opts, labelNames)
- return &gauge{
- gauge: gv.WithLabelValues(f.tagsAsLabelValues(labelNames, tags)...),
- }
-}
-
-// Timer implements Timer of metrics.Factory.
-func (f *Factory) Timer(options metrics.TimerOptions) metrics.Timer {
- help := strings.TrimSpace(options.Help)
- if len(help) == 0 {
- help = options.Name
- }
- name := f.subScope(options.Name)
- buckets := f.selectBuckets(asFloatBuckets(options.Buckets))
- tags := f.mergeTags(options.Tags)
- labelNames := f.tagNames(tags)
- opts := prometheus.HistogramOpts{
- Name: name,
- Help: help,
- Buckets: buckets,
- }
- hv := f.cache.getOrMakeHistogramVec(opts, labelNames)
- return &timer{
- histogram: hv.WithLabelValues(f.tagsAsLabelValues(labelNames, tags)...),
- }
-}
-
-func asFloatBuckets(buckets []time.Duration) []float64 {
- data := make([]float64, len(buckets))
- for i := range data {
- data[i] = float64(buckets[i]) / float64(time.Second)
- }
- return data
-}
-
-// Histogram implements Histogram of metrics.Factory.
-func (f *Factory) Histogram(options metrics.HistogramOptions) metrics.Histogram {
- help := strings.TrimSpace(options.Help)
- if len(help) == 0 {
- help = options.Name
- }
- name := f.subScope(options.Name)
- buckets := f.selectBuckets(options.Buckets)
- tags := f.mergeTags(options.Tags)
- labelNames := f.tagNames(tags)
- opts := prometheus.HistogramOpts{
- Name: name,
- Help: help,
- Buckets: buckets,
- }
- hv := f.cache.getOrMakeHistogramVec(opts, labelNames)
- return &histogram{
- histogram: hv.WithLabelValues(f.tagsAsLabelValues(labelNames, tags)...),
- }
-}
-
-// Namespace implements Namespace of metrics.Factory.
-func (f *Factory) Namespace(scope metrics.NSOptions) metrics.Factory {
- return newFactory(f, f.subScope(scope.Name), f.mergeTags(scope.Tags))
-}
-
-type counter struct {
- counter prometheus.Counter
-}
-
-func (c *counter) Inc(v int64) {
- c.counter.Add(float64(v))
-}
-
-type gauge struct {
- gauge prometheus.Gauge
-}
-
-func (g *gauge) Update(v int64) {
- g.gauge.Set(float64(v))
-}
-
-type observer interface {
- Observe(v float64)
-}
-
-type timer struct {
- histogram observer
-}
-
-func (t *timer) Record(v time.Duration) {
- t.histogram.Observe(float64(v.Nanoseconds()) / float64(time.Second/time.Nanosecond))
-}
-
-type histogram struct {
- histogram observer
-}
-
-func (h *histogram) Record(v float64) {
- h.histogram.Observe(v)
-}
-
-func (f *Factory) subScope(name string) string {
- if f.scope == "" {
- return f.normalize(name)
- }
- if name == "" {
- return f.normalize(f.scope)
- }
- return f.normalize(f.scope + string(f.separator) + name)
-}
-
-func (f *Factory) normalize(v string) string {
- return f.normalizer.Replace(v)
-}
-
-func (f *Factory) mergeTags(tags map[string]string) map[string]string {
- ret := make(map[string]string, len(f.tags)+len(tags))
- for k, v := range f.tags {
- ret[k] = v
- }
- for k, v := range tags {
- ret[k] = v
- }
- return ret
-}
-
-func (f *Factory) tagNames(tags map[string]string) []string {
- ret := make([]string, 0, len(tags))
- for k := range tags {
- ret = append(ret, k)
- }
- sort.Strings(ret)
- return ret
-}
-
-func (f *Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string {
- ret := make([]string, 0, len(tags))
- for _, l := range labels {
- ret = append(ret, tags[l])
- }
- return ret
-}
-
-func (f *Factory) selectBuckets(buckets []float64) []float64 {
- if len(buckets) > 0 {
- return buckets
- }
- return f.buckets
-}
-
-func counterNamingConvention(name string) string {
- if !strings.HasSuffix(name, "_total") {
- name += "_total"
- }
- return name
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
deleted file mode 100644
index 4a8abdb539..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// StartStopwatch begins recording the executing time of an event, returning
-// a Stopwatch that should be used to stop the recording the time for
-// that event. Multiple events can be occurring simultaneously each
-// represented by different active Stopwatches
-func StartStopwatch(timer Timer) Stopwatch {
- return Stopwatch{t: timer, start: time.Now()}
-}
-
-// A Stopwatch tracks the execution time of a specific event
-type Stopwatch struct {
- t Timer
- start time.Time
-}
-
-// Stop stops executing of the stopwatch and records the amount of elapsed time
-func (s Stopwatch) Stop() {
- s.t.Record(s.ElapsedTime())
-}
-
-// ElapsedTime returns the amount of elapsed time (in time.Duration)
-func (s Stopwatch) ElapsedTime() time.Duration {
- return time.Since(s.start)
-}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
deleted file mode 100644
index e18d222abb..0000000000
--- a/vendor/github.com/uber/jaeger-lib/metrics/timer.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metrics
-
-import (
- "time"
-)
-
-// Timer accumulates observations about how long some operation took,
-// and also maintains a historgam of percentiles.
-type Timer interface {
- // Records the time passed in.
- Record(time.Duration)
-}
-
-// NullTimer timer that does nothing
-var NullTimer Timer = nullTimer{}
-
-type nullTimer struct{}
-
-func (nullTimer) Record(time.Duration) {}
diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/LICENSE
similarity index 100%
rename from vendor/github.com/opentracing-contrib/go-stdlib/LICENSE
rename to vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/attributes.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/attributes.go
new file mode 100644
index 0000000000..5a00c7402c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/attributes.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+import (
+ "context"
+
+ v2Middleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/smithy-go/middleware"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+)
+
+// AWS attributes.
+const (
+ RegionKey attribute.Key = "aws.region"
+ RequestIDKey attribute.Key = "aws.request_id"
+ AWSSystemVal string = "aws-api"
+)
+
+var servicemap = map[string]AttributeSetter{
+ dynamodb.ServiceID: DynamoDBAttributeSetter,
+ sqs.ServiceID: SQSAttributeSetter,
+}
+
+// SystemAttr return the AWS RPC system attribute.
+func SystemAttr() attribute.KeyValue {
+ return semconv.RPCSystemKey.String(AWSSystemVal)
+}
+
+// OperationAttr returns the AWS operation attribute.
+func OperationAttr(operation string) attribute.KeyValue {
+ return semconv.RPCMethod(operation)
+}
+
+// RegionAttr returns the AWS region attribute.
+func RegionAttr(region string) attribute.KeyValue {
+ return RegionKey.String(region)
+}
+
+// ServiceAttr returns the AWS service attribute.
+func ServiceAttr(service string) attribute.KeyValue {
+ return semconv.RPCService(service)
+}
+
+// RequestIDAttr returns the AWS request ID attribute.
+func RequestIDAttr(requestID string) attribute.KeyValue {
+ return RequestIDKey.String(requestID)
+}
+
+// DefaultAttributeSetter checks to see if there are service specific attributes available to set for the AWS service.
+// If there are service specific attributes available then they will be included.
+func DefaultAttributeSetter(ctx context.Context, in middleware.InitializeInput) []attribute.KeyValue {
+ serviceID := v2Middleware.GetServiceID(ctx)
+
+ if fn, ok := servicemap[serviceID]; ok {
+ return fn(ctx, in)
+ }
+
+ return []attribute.KeyValue{}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/aws.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/aws.go
new file mode 100644
index 0000000000..5709db82cf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/aws.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+import (
+ "context"
+ "time"
+
+ v2Middleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ // ScopeName is the instrumentation scope name.
+ ScopeName = "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+)
+
+type spanTimestampKey struct{}
+
+// AttributeSetter returns an array of KeyValue pairs, it can be used to set custom attributes.
+type AttributeSetter func(context.Context, middleware.InitializeInput) []attribute.KeyValue
+
+type otelMiddlewares struct {
+ tracer trace.Tracer
+ propagator propagation.TextMapPropagator
+ attributeSetter []AttributeSetter
+}
+
+func (m otelMiddlewares) initializeMiddlewareBefore(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc("OTelInitializeMiddlewareBefore", func(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ ctx = context.WithValue(ctx, spanTimestampKey{}, time.Now())
+ return next.HandleInitialize(ctx, in)
+ }),
+ middleware.Before)
+}
+
+func (m otelMiddlewares) initializeMiddlewareAfter(stack *middleware.Stack) error {
+ return stack.Initialize.Add(middleware.InitializeMiddlewareFunc("OTelInitializeMiddlewareAfter", func(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ serviceID := v2Middleware.GetServiceID(ctx)
+ operation := v2Middleware.GetOperationName(ctx)
+ region := v2Middleware.GetRegion(ctx)
+
+ attributes := []attribute.KeyValue{
+ SystemAttr(),
+ ServiceAttr(serviceID),
+ RegionAttr(region),
+ OperationAttr(operation),
+ }
+ for _, setter := range m.attributeSetter {
+ attributes = append(attributes, setter(ctx, in)...)
+ }
+
+ ctx, span := m.tracer.Start(ctx, spanName(serviceID, operation),
+ trace.WithTimestamp(ctx.Value(spanTimestampKey{}).(time.Time)),
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attributes...),
+ )
+ defer span.End()
+
+ out, metadata, err = next.HandleInitialize(ctx, in)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ }
+
+ return out, metadata, err
+ }),
+ middleware.After)
+}
+
+func (m otelMiddlewares) deserializeMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(middleware.DeserializeMiddlewareFunc("OTelDeserializeMiddleware", func(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ span := trace.SpanFromContext(ctx)
+ span.SetAttributes(semconv.HTTPStatusCode(resp.StatusCode))
+
+ requestID, ok := v2Middleware.GetRequestIDMetadata(metadata)
+ if ok {
+ span.SetAttributes(RequestIDAttr(requestID))
+ }
+
+ return out, metadata, err
+ }),
+ middleware.Before)
+}
+
+func (m otelMiddlewares) finalizeMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Add(middleware.FinalizeMiddlewareFunc("OTelFinalizeMiddleware", func(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+ ) {
+ // Propagate the Trace information by injecting it into the HTTP request.
+ switch req := in.Request.(type) {
+ case *smithyhttp.Request:
+ m.propagator.Inject(ctx, propagation.HeaderCarrier(req.Header))
+ default:
+ }
+
+ return next.HandleFinalize(ctx, in)
+ }),
+ middleware.Before)
+}
+
+func spanName(serviceID, operation string) string {
+ spanName := serviceID
+ if operation != "" {
+ spanName += "." + operation
+ }
+ return spanName
+}
+
+// AppendMiddlewares attaches OTel middlewares to the AWS Go SDK V2 for instrumentation.
+// OTel middlewares can be appended to either all aws clients or a specific operation.
+// Please see more details in https://aws.github.io/aws-sdk-go-v2/docs/middleware/
+func AppendMiddlewares(apiOptions *[]func(*middleware.Stack) error, opts ...Option) {
+ cfg := config{
+ TracerProvider: otel.GetTracerProvider(),
+ TextMapPropagator: otel.GetTextMapPropagator(),
+ }
+ for _, opt := range opts {
+ opt.apply(&cfg)
+ }
+
+ if cfg.AttributeSetter == nil {
+ cfg.AttributeSetter = []AttributeSetter{DefaultAttributeSetter}
+ }
+
+ m := otelMiddlewares{
+ tracer: cfg.TracerProvider.Tracer(ScopeName,
+ trace.WithInstrumentationVersion(Version())),
+ propagator: cfg.TextMapPropagator,
+ attributeSetter: cfg.AttributeSetter,
+ }
+ *apiOptions = append(*apiOptions, m.initializeMiddlewareBefore, m.initializeMiddlewareAfter, m.finalizeMiddleware, m.deserializeMiddleware)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/config.go
new file mode 100644
index 0000000000..57be19d20f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/config.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+import (
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type config struct {
+ TracerProvider trace.TracerProvider
+ TextMapPropagator propagation.TextMapPropagator
+ AttributeSetter []AttributeSetter
+}
+
+// Option applies an option value.
+type Option interface {
+ apply(*config)
+}
+
+// optionFunc provides a convenience wrapper for simple Options
+// that can be represented as functions.
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global TracerProvider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithTextMapPropagator specifies a Text Map Propagator to use when propagating context.
+// If none is specified, the global TextMapPropagator is used.
+func WithTextMapPropagator(propagator propagation.TextMapPropagator) Option {
+ return optionFunc(func(cfg *config) {
+ if propagator != nil {
+ cfg.TextMapPropagator = propagator
+ }
+ })
+}
+
+// WithAttributeSetter specifies an attribute setter function for setting service specific attributes.
+// If none is specified, the service will be determined by the DefaultAttributeSetter function and the corresponding attributes will be included.
+func WithAttributeSetter(attributesetters ...AttributeSetter) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.AttributeSetter = append(cfg.AttributeSetter, attributesetters...)
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/dynamodbattributes.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/dynamodbattributes.go
new file mode 100644
index 0000000000..81d66fe61f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/dynamodbattributes.go
@@ -0,0 +1,183 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/smithy-go/middleware"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+)
+
+// DynamoDBAttributeSetter sets DynamoDB specific attributes depending on the DynamoDB operation being performed.
+func DynamoDBAttributeSetter(ctx context.Context, in middleware.InitializeInput) []attribute.KeyValue {
+ dynamodbAttributes := []attribute.KeyValue{semconv.DBSystemDynamoDB}
+
+ switch v := in.Parameters.(type) {
+ case *dynamodb.GetItemInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ if v.ConsistentRead != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBConsistentRead(*v.ConsistentRead))
+ }
+
+ if v.ProjectionExpression != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProjection(*v.ProjectionExpression))
+ }
+
+ case *dynamodb.BatchGetItemInput:
+ var tableNames []string
+ for k := range v.RequestItems {
+ tableNames = append(tableNames, k)
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(tableNames...))
+
+ case *dynamodb.BatchWriteItemInput:
+ var tableNames []string
+ for k := range v.RequestItems {
+ tableNames = append(tableNames, k)
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(tableNames...))
+
+ case *dynamodb.CreateTableInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ if v.GlobalSecondaryIndexes != nil {
+ var idx []string
+ for _, gsi := range v.GlobalSecondaryIndexes {
+ i, _ := json.Marshal(gsi)
+ idx = append(idx, string(i))
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBGlobalSecondaryIndexes(idx...))
+ }
+
+ if v.LocalSecondaryIndexes != nil {
+ var idx []string
+ for _, lsi := range v.LocalSecondaryIndexes {
+ i, _ := json.Marshal(lsi)
+ idx = append(idx, string(i))
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBLocalSecondaryIndexes(idx...))
+ }
+
+ if v.ProvisionedThroughput != nil {
+ read := float64(*v.ProvisionedThroughput.ReadCapacityUnits)
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProvisionedReadCapacity(read))
+ write := float64(*v.ProvisionedThroughput.WriteCapacityUnits)
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProvisionedWriteCapacity(write))
+ }
+
+ case *dynamodb.DeleteItemInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ case *dynamodb.DeleteTableInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ case *dynamodb.DescribeTableInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ case *dynamodb.ListTablesInput:
+ if v.ExclusiveStartTableName != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBExclusiveStartTable(*v.ExclusiveStartTableName))
+ }
+
+ if v.Limit != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBLimit(int(*v.Limit)))
+ }
+
+ case *dynamodb.PutItemInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ case *dynamodb.QueryInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ if v.ConsistentRead != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBConsistentRead(*v.ConsistentRead))
+ }
+
+ if v.IndexName != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBIndexName(*v.IndexName))
+ }
+
+ if v.Limit != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBLimit(int(*v.Limit)))
+ }
+
+ if v.ScanIndexForward != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBScanForward(*v.ScanIndexForward))
+ }
+
+ if v.ProjectionExpression != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProjection(*v.ProjectionExpression))
+ }
+
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBSelect(string(v.Select)))
+
+ case *dynamodb.ScanInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ if v.ConsistentRead != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBConsistentRead(*v.ConsistentRead))
+ }
+
+ if v.IndexName != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBIndexName(*v.IndexName))
+ }
+
+ if v.Limit != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBLimit(int(*v.Limit)))
+ }
+
+ if v.ProjectionExpression != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProjection(*v.ProjectionExpression))
+ }
+
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBSelect(string(v.Select)))
+
+ if v.Segment != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBSegment(int(*v.Segment)))
+ }
+
+ if v.TotalSegments != nil {
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTotalSegments(int(*v.TotalSegments)))
+ }
+
+ case *dynamodb.UpdateItemInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ case *dynamodb.UpdateTableInput:
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBTableNames(*v.TableName))
+
+ if v.AttributeDefinitions != nil {
+ var def []string
+ for _, ad := range v.AttributeDefinitions {
+ d, _ := json.Marshal(ad)
+ def = append(def, string(d))
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBAttributeDefinitions(def...))
+ }
+
+ if v.GlobalSecondaryIndexUpdates != nil {
+ var idx []string
+ for _, gsiu := range v.GlobalSecondaryIndexUpdates {
+ i, _ := json.Marshal(gsiu)
+ idx = append(idx, string(i))
+ }
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBGlobalSecondaryIndexUpdates(idx...))
+ }
+
+ if v.ProvisionedThroughput != nil {
+ read := float64(*v.ProvisionedThroughput.ReadCapacityUnits)
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProvisionedReadCapacity(read))
+ write := float64(*v.ProvisionedThroughput.WriteCapacityUnits)
+ dynamodbAttributes = append(dynamodbAttributes, semconv.AWSDynamoDBProvisionedWriteCapacity(write))
+ }
+ }
+
+ return dynamodbAttributes
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/sqsattributes.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/sqsattributes.go
new file mode 100644
index 0000000000..cdc064da94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/sqsattributes.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/smithy-go/middleware"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+)
+
+// SQSAttributeSetter sets SQS specific attributes depending on the SQS operation being performed.
+func SQSAttributeSetter(ctx context.Context, in middleware.InitializeInput) []attribute.KeyValue {
+ sqsAttributes := []attribute.KeyValue{semconv.MessagingSystem("AmazonSQS")}
+
+ key := semconv.NetPeerNameKey
+ switch v := in.Parameters.(type) {
+ case *sqs.DeleteMessageBatchInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.DeleteMessageInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.DeleteQueueInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.GetQueueAttributesInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.ListDeadLetterSourceQueuesInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.ListQueueTagsInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.PurgeQueueInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.ReceiveMessageInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.RemovePermissionInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.SendMessageBatchInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.SendMessageInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.SetQueueAttributesInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.TagQueueInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ case *sqs.UntagQueueInput:
+ sqsAttributes = append(sqsAttributes, key.String(*v.QueueUrl))
+ }
+
+ return sqsAttributes
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go
new file mode 100644
index 0000000000..1767c1cc41
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws"
+
+// Version is the current release version of the AWS SDKv2 instrumentation.
+func Version() string {
+ return "0.52.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/client_golang/LICENSE
rename to vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/config.go
new file mode 100644
index 0000000000..5b14a269d0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/config.go
@@ -0,0 +1,68 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelmongo // import "go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo"
+
+import (
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ScopeName is the instrumentation scope name.
+const ScopeName = "go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo"
+
+// config is used to configure the mongo tracer.
+type config struct {
+ TracerProvider trace.TracerProvider
+
+ Tracer trace.Tracer
+
+ CommandAttributeDisabled bool
+}
+
+// newConfig returns a config with all Options set.
+func newConfig(opts ...Option) config {
+ cfg := config{
+ TracerProvider: otel.GetTracerProvider(),
+ CommandAttributeDisabled: true,
+ }
+ for _, opt := range opts {
+ opt.apply(&cfg)
+ }
+
+ cfg.Tracer = cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+ return cfg
+}
+
+// Option specifies instrumentation configuration options.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithCommandAttributeDisabled specifies if the MongoDB command is added as an attribute to Spans or not.
+// This is disabled by default and the MongoDB command will not be added as an attribute
+// to Spans if this option is not provided.
+func WithCommandAttributeDisabled(disabled bool) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.CommandAttributeDisabled = disabled
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/doc.go
new file mode 100644
index 0000000000..61be6c45dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/doc.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package otelmongo instruments go.mongodb.org/mongo-driver/mongo.
+//
+// This package is compatable with v0.2.0 of
+// go.mongodb.org/mongo-driver/mongo.
+//
+// `NewMonitor` will return an event.CommandMonitor which is used to trace
+// requests.
+//
+// This code was originally based on the following:
+// - https://github.com/DataDog/dd-trace-go/tree/02f0449efa3cb382d499fadc873957385dcb2192/contrib/go.mongodb.org/mongo-driver/mongo
+// - https://github.com/DataDog/dd-trace-go/tree/v1.23.3/ddtrace/ext
+package otelmongo // import "go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/mongo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/mongo.go
new file mode 100644
index 0000000000..4fdd270536
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/mongo.go
@@ -0,0 +1,149 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelmongo // import "go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo"
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/event"
+)
+
+type spanKey struct {
+ ConnectionID string
+ RequestID int64
+}
+
+type monitor struct {
+ sync.Mutex
+ spans map[spanKey]trace.Span
+ cfg config
+}
+
+func (m *monitor) Started(ctx context.Context, evt *event.CommandStartedEvent) {
+ var spanName string
+
+ hostname, port := peerInfo(evt)
+
+ attrs := []attribute.KeyValue{
+ semconv.DBSystemMongoDB,
+ semconv.DBOperation(evt.CommandName),
+ semconv.DBName(evt.DatabaseName),
+ semconv.NetPeerName(hostname),
+ semconv.NetPeerPort(port),
+ semconv.NetTransportTCP,
+ }
+ if !m.cfg.CommandAttributeDisabled {
+ attrs = append(attrs, semconv.DBStatement(sanitizeCommand(evt.Command)))
+ }
+ if collection, err := extractCollection(evt); err == nil && collection != "" {
+ spanName = collection + "."
+ attrs = append(attrs, semconv.DBMongoDBCollection(collection))
+ }
+ spanName += evt.CommandName
+ opts := []trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attrs...),
+ }
+ _, span := m.cfg.Tracer.Start(ctx, spanName, opts...)
+ key := spanKey{
+ ConnectionID: evt.ConnectionID,
+ RequestID: evt.RequestID,
+ }
+ m.Lock()
+ m.spans[key] = span
+ m.Unlock()
+}
+
+func (m *monitor) Succeeded(ctx context.Context, evt *event.CommandSucceededEvent) {
+ m.Finished(&evt.CommandFinishedEvent, nil)
+}
+
+func (m *monitor) Failed(ctx context.Context, evt *event.CommandFailedEvent) {
+ m.Finished(&evt.CommandFinishedEvent, fmt.Errorf("%s", evt.Failure))
+}
+
+func (m *monitor) Finished(evt *event.CommandFinishedEvent, err error) {
+ key := spanKey{
+ ConnectionID: evt.ConnectionID,
+ RequestID: evt.RequestID,
+ }
+ m.Lock()
+ span, ok := m.spans[key]
+ if ok {
+ delete(m.spans, key)
+ }
+ m.Unlock()
+ if !ok {
+ return
+ }
+
+ if err != nil {
+ span.SetStatus(codes.Error, err.Error())
+ }
+
+ span.End()
+}
+
+// TODO sanitize values where possible, then reenable `db.statement` span attributes default.
+// TODO limit maximum size.
+func sanitizeCommand(command bson.Raw) string {
+ b, _ := bson.MarshalExtJSON(command, false, false)
+ return string(b)
+}
+
+// extractCollection extracts the collection for the given mongodb command event.
+// For CRUD operations, this is the first key/value string pair in the bson
+// document where key == "" (e.g. key == "insert").
+// For database meta-level operations, such a key may not exist.
+func extractCollection(evt *event.CommandStartedEvent) (string, error) {
+ elt, err := evt.Command.IndexErr(0)
+ if err != nil {
+ return "", err
+ }
+ if key, err := elt.KeyErr(); err == nil && key == evt.CommandName {
+ var v bson.RawValue
+ if v, err = elt.ValueErr(); err != nil || v.Type != bson.TypeString {
+ return "", err
+ }
+ return v.StringValue(), nil
+ }
+ return "", fmt.Errorf("collection name not found")
+}
+
+// NewMonitor creates a new mongodb event CommandMonitor.
+func NewMonitor(opts ...Option) *event.CommandMonitor {
+ cfg := newConfig(opts...)
+ m := &monitor{
+ spans: make(map[spanKey]trace.Span),
+ cfg: cfg,
+ }
+ return &event.CommandMonitor{
+ Started: m.Started,
+ Succeeded: m.Succeeded,
+ Failed: m.Failed,
+ }
+}
+
+func peerInfo(evt *event.CommandStartedEvent) (hostname string, port int) {
+ hostname = evt.ConnectionID
+ port = 27017
+ if idx := strings.IndexByte(hostname, '['); idx >= 0 {
+ hostname = hostname[:idx]
+ }
+ if idx := strings.IndexByte(hostname, ':'); idx >= 0 {
+ port = func(p int, e error) int { return p }(strconv.Atoi(hostname[idx+1:]))
+ hostname = hostname[:idx]
+ }
+ return hostname, port
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/version.go
new file mode 100644
index 0000000000..8a9c0f4639
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelmongo // import "go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo"
+
+// Version is the current release version of the mongo-driver instrumentation.
+func Version() string {
+ return "0.52.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/client_model/LICENSE
rename to vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
new file mode 100644
index 0000000000..a199b36b4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -0,0 +1,259 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "google.golang.org/grpc/stats"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ // ScopeName is the instrumentation scope name.
+ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ // GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
+ GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+// InterceptorFilter is a predicate used to determine whether a given request in
+// interceptor info should be instrumented. A InterceptorFilter must return true if
+// the request should be traced.
+//
+// Deprecated: Use stats handlers instead.
+type InterceptorFilter func(*InterceptorInfo) bool
+
+// Filter is a predicate used to determine whether a given request in
+// should be instrumented by the attatched RPC tag info.
+// A Filter must return true if the request should be instrumented.
+type Filter func(*stats.RPCTagInfo) bool
+
+// config is a group of options for this instrumentation.
+type config struct {
+ Filter Filter
+ InterceptorFilter InterceptorFilter
+ Propagators propagation.TextMapPropagator
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ SpanStartOptions []trace.SpanStartOption
+
+ ReceivedEvent bool
+ SentEvent bool
+
+ tracer trace.Tracer
+ meter metric.Meter
+
+ rpcDuration metric.Float64Histogram
+ rpcRequestSize metric.Int64Histogram
+ rpcResponseSize metric.Int64Histogram
+ rpcRequestsPerRPC metric.Int64Histogram
+ rpcResponsesPerRPC metric.Int64Histogram
+}
+
+// Option applies an option value for a config.
+type Option interface {
+ apply(*config)
+}
+
+// newConfig returns a config configured with all the passed Options.
+func newConfig(opts []Option, role string) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ TracerProvider: otel.GetTracerProvider(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, o := range opts {
+ o.apply(c)
+ }
+
+ c.tracer = c.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
+ c.meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
+ metric.WithDescription("Measures the duration of inbound RPC."),
+ metric.WithUnit("ms"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcDuration == nil {
+ c.rpcDuration = noop.Float64Histogram{}
+ }
+ }
+
+ c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
+ metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestSize == nil {
+ c.rpcRequestSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
+ metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponseSize == nil {
+ c.rpcResponseSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestsPerRPC == nil {
+ c.rpcRequestsPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponsesPerRPC == nil {
+ c.rpcResponsesPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ return c
+}
+
+type propagatorsOption struct{ p propagation.TextMapPropagator }
+
+func (o propagatorsOption) apply(c *config) {
+ if o.p != nil {
+ c.Propagators = o.p
+ }
+}
+
+// WithPropagators returns an Option to use the Propagators when extracting
+// and injecting trace context from requests.
+func WithPropagators(p propagation.TextMapPropagator) Option {
+ return propagatorsOption{p: p}
+}
+
+type tracerProviderOption struct{ tp trace.TracerProvider }
+
+func (o tracerProviderOption) apply(c *config) {
+ if o.tp != nil {
+ c.TracerProvider = o.tp
+ }
+}
+
+// WithInterceptorFilter returns an Option to use the request filter.
+//
+// Deprecated: Use stats handlers instead.
+func WithInterceptorFilter(f InterceptorFilter) Option {
+ return interceptorFilterOption{f: f}
+}
+
+type interceptorFilterOption struct {
+ f InterceptorFilter
+}
+
+func (o interceptorFilterOption) apply(c *config) {
+ if o.f != nil {
+ c.InterceptorFilter = o.f
+ }
+}
+
+// WithFilter returns an Option to use the request filter.
+func WithFilter(f Filter) Option {
+ return filterOption{f: f}
+}
+
+type filterOption struct {
+ f Filter
+}
+
+func (o filterOption) apply(c *config) {
+ if o.f != nil {
+ c.Filter = o.f
+ }
+}
+
+// WithTracerProvider returns an Option to use the TracerProvider when
+// creating a Tracer.
+func WithTracerProvider(tp trace.TracerProvider) Option {
+ return tracerProviderOption{tp: tp}
+}
+
+type meterProviderOption struct{ mp metric.MeterProvider }
+
+func (o meterProviderOption) apply(c *config) {
+ if o.mp != nil {
+ c.MeterProvider = o.mp
+ }
+}
+
+// WithMeterProvider returns an Option to use the MeterProvider when
+// creating a Meter. If this option is not provide the global MeterProvider will be used.
+func WithMeterProvider(mp metric.MeterProvider) Option {
+ return meterProviderOption{mp: mp}
+}
+
+// Event type that can be recorded, see WithMessageEvents.
+type Event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReceivedEvents Event = iota
+ SentEvents
+)
+
+type messageEventsProviderOption struct {
+ events []Event
+}
+
+func (m messageEventsProviderOption) apply(c *config) {
+ for _, e := range m.events {
+ switch e {
+ case ReceivedEvents:
+ c.ReceivedEvent = true
+ case SentEvents:
+ c.SentEvent = true
+ }
+ }
+}
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReceivedEvents: Record the number of bytes read after every gRPC read operation.
+// - SentEvents: Record the number of bytes written after every gRPC write operation.
+func WithMessageEvents(events ...Event) Option {
+ return messageEventsProviderOption{events: events}
+}
+
+type spanStartOption struct{ opts []trace.SpanStartOption }
+
+func (o spanStartOption) apply(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, o.opts...)
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return spanStartOption{opts}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
new file mode 100644
index 0000000000..b8b836b00f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
+
+Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
+
+Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
+*/
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
new file mode 100644
index 0000000000..7f19058e4c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -0,0 +1,529 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// gRPC tracing middleware
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
+import (
+ "context"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc"
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type messageType attribute.KeyValue
+
+// Event adds an event of the messageType to the span associated with the
+// passed context with a message id.
+func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
+ span := trace.SpanFromContext(ctx)
+ if !span.IsRecording() {
+ return
+ }
+ span.AddEvent("message", trace.WithAttributes(
+ attribute.KeyValue(m),
+ RPCMessageIDKey.Int(id),
+ ))
+}
+
+var (
+ messageSent = messageType(RPCMessageTypeSent)
+ messageReceived = messageType(RPCMessageTypeReceived)
+)
+
+// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ method string,
+ req, reply interface{},
+ cc *grpc.ClientConn,
+ invoker grpc.UnaryInvoker,
+ callOpts ...grpc.CallOption,
+ ) error {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: UnaryClient,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return invoker(ctx, method, req, reply, cc, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, req)
+ }
+
+ err := invoker(ctx, method, req, reply, cc, callOpts...)
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, reply)
+ }
+
+ if err != nil {
+ s, _ := status.FromError(err)
+ span.SetStatus(codes.Error, s.Message())
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type clientStream struct {
+ grpc.ClientStream
+ desc *grpc.StreamDesc
+
+ span trace.Span
+
+ receivedEvent bool
+ sentEvent bool
+
+ receivedMessageID int
+ sentMessageID int
+}
+
+var _ = proto.Marshal
+
+func (w *clientStream) RecvMsg(m interface{}) error {
+ err := w.ClientStream.RecvMsg(m)
+
+ if err == nil && !w.desc.ServerStreams {
+ w.endSpan(nil)
+ } else if err == io.EOF {
+ w.endSpan(nil)
+ } else if err != nil {
+ w.endSpan(err)
+ } else {
+ w.receivedMessageID++
+
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *clientStream) SendMsg(m interface{}) error {
+ err := w.ClientStream.SendMsg(m)
+
+ w.sentMessageID++
+
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func (w *clientStream) Header() (metadata.MD, error) {
+ md, err := w.ClientStream.Header()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return md, err
+}
+
+func (w *clientStream) CloseSend() error {
+ err := w.ClientStream.CloseSend()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
+ return &clientStream{
+ ClientStream: s,
+ span: span,
+ desc: desc,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+func (w *clientStream) endSpan(err error) {
+ if err != nil {
+ s, _ := status.FromError(err)
+ w.span.SetStatus(codes.Error, s.Message())
+ w.span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ w.span.End()
+}
+
+// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ desc *grpc.StreamDesc,
+ cc *grpc.ClientConn,
+ method string,
+ streamer grpc.Streamer,
+ callOpts ...grpc.CallOption,
+ ) (grpc.ClientStream, error) {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: StreamClient,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return streamer(ctx, desc, cc, method, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ s, err := streamer(ctx, desc, cc, method, callOpts...)
+ if err != nil {
+ grpcStatus, _ := status.FromError(err)
+ span.SetStatus(codes.Error, grpcStatus.Message())
+ span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
+ span.End()
+ return s, err
+ }
+ stream := wrapClientStream(s, desc, span, cfg)
+ return stream, nil
+ }
+}
+
+// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ req interface{},
+ info *grpc.UnaryServerInfo,
+ handler grpc.UnaryHandler,
+ ) (interface{}, error) {
+ i := &InterceptorInfo{
+ UnaryServerInfo: info,
+ Type: UnaryServer,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return handler(ctx, req)
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, req)
+ }
+
+ before := time.Now()
+
+ resp, err := handler(ctx, req)
+
+ s, _ := status.FromError(err)
+ if err != nil {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, s.Proto())
+ }
+ } else {
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, resp)
+ }
+ }
+ grpcStatusCodeAttr := statusCodeAttr(s.Code())
+ span.SetAttributes(grpcStatusCodeAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
+
+ metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
+ cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+
+ return resp, err
+ }
+}
+
+// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type serverStream struct {
+ grpc.ServerStream
+ ctx context.Context
+
+ receivedMessageID int
+ sentMessageID int
+
+ receivedEvent bool
+ sentEvent bool
+}
+
+func (w *serverStream) Context() context.Context {
+ return w.ctx
+}
+
+func (w *serverStream) RecvMsg(m interface{}) error {
+ err := w.ServerStream.RecvMsg(m)
+
+ if err == nil {
+ w.receivedMessageID++
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *serverStream) SendMsg(m interface{}) error {
+ err := w.ServerStream.SendMsg(m)
+
+ w.sentMessageID++
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ return err
+}
+
+func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
+ return &serverStream{
+ ServerStream: ss,
+ ctx: ctx,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ srv interface{},
+ ss grpc.ServerStream,
+ info *grpc.StreamServerInfo,
+ handler grpc.StreamHandler,
+ ) error {
+ ctx := ss.Context()
+ i := &InterceptorInfo{
+ StreamServerInfo: info,
+ Type: StreamServer,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return handler(srv, wrapServerStream(ctx, ss, cfg))
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ err := handler(srv, wrapServerStream(ctx, ss, cfg))
+ if err != nil {
+ s, _ := status.FromError(err)
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// telemetryAttributes returns a span name and span and metric attributes from
+// the gRPC method and peer address.
+func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
+ name, methodAttrs := internal.ParseFullMethod(fullMethod)
+ peerAttrs := peerAttr(peerAddress)
+
+ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
+ attrs = append(attrs, RPCSystemGRPC)
+ attrs = append(attrs, methodAttrs...)
+ metricAttrs := attrs[:1+len(methodAttrs)]
+ attrs = append(attrs, peerAttrs...)
+ return name, attrs, metricAttrs
+}
+
+// peerAttr returns attributes about the peer address.
+func peerAttr(addr string) []attribute.KeyValue {
+ host, p, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil
+ }
+
+ if host == "" {
+ host = "127.0.0.1"
+ }
+ port, err := strconv.Atoi(p)
+ if err != nil {
+ return nil
+ }
+
+ var attr []attribute.KeyValue
+ if ip := net.ParseIP(host); ip != nil {
+ attr = []attribute.KeyValue{
+ semconv.NetSockPeerAddr(host),
+ semconv.NetSockPeerPort(port),
+ }
+ } else {
+ attr = []attribute.KeyValue{
+ semconv.NetPeerName(host),
+ semconv.NetPeerPort(port),
+ }
+ }
+
+ return attr
+}
+
+// peerFromCtx returns a peer address from a context, if one exists.
+func peerFromCtx(ctx context.Context) string {
+ p, ok := peer.FromContext(ctx)
+ if !ok {
+ return ""
+ }
+ return p.Addr.String()
+}
+
+// statusCodeAttr returns status code attribute based on given gRPC code.
+func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
+ return GRPCStatusCodeKey.Int64(int64(c))
+}
+
+// serverStatus returns a span status code and message for a given gRPC
+// status code. It maps specific gRPC status codes to a corresponding span
+// status code and message. This function is intended for use on the server
+// side of a gRPC connection.
+//
+// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented,
+// Internal, Unavailable, or DataLoss, it returns a span status code of Error
+// and the message from the gRPC status. Otherwise, it returns a span status
+// code of Unset and an empty message.
+func serverStatus(grpcStatus *status.Status) (codes.Code, string) {
+ switch grpcStatus.Code() {
+ case grpc_codes.Unknown,
+ grpc_codes.DeadlineExceeded,
+ grpc_codes.Unimplemented,
+ grpc_codes.Internal,
+ grpc_codes.Unavailable,
+ grpc_codes.DataLoss:
+ return codes.Error, grpcStatus.Message()
+ default:
+ return codes.Unset, ""
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
new file mode 100644
index 0000000000..b62f7cd7c4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "google.golang.org/grpc"
+)
+
+// InterceptorType is the flag to define which gRPC interceptor
+// the InterceptorInfo object is.
+type InterceptorType uint8
+
+const (
+ // UndefinedInterceptor is the type for the interceptor information that is not
+ // well initialized or categorized to other types.
+ UndefinedInterceptor InterceptorType = iota
+ // UnaryClient is the type for grpc.UnaryClient interceptor.
+ UnaryClient
+ // StreamClient is the type for grpc.StreamClient interceptor.
+ StreamClient
+ // UnaryServer is the type for grpc.UnaryServer interceptor.
+ UnaryServer
+ // StreamServer is the type for grpc.StreamServer interceptor.
+ StreamServer
+)
+
+// InterceptorInfo is the union of some arguments to four types of
+// gRPC interceptors.
+type InterceptorInfo struct {
+ // Method is method name registered to UnaryClient and StreamClient
+ Method string
+ // UnaryServerInfo is the metadata for UnaryServer
+ UnaryServerInfo *grpc.UnaryServerInfo
+ // StreamServerInfo if the metadata for StreamServer
+ StreamServerInfo *grpc.StreamServerInfo
+ // Type is the type for interceptor
+ Type InterceptorType
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
new file mode 100644
index 0000000000..bef07b7a3c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+
+import (
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// ParseFullMethod returns a span name following the OpenTelemetry semantic
+// conventions as well as all applicable span attribute.KeyValue attributes based
+// on a gRPC's FullMethod.
+//
+// Parsing is consistent with grpc-go implementation:
+// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39
+func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
+ if !strings.HasPrefix(fullMethod, "/") {
+ // Invalid format, does not follow `/package.service/method`.
+ return fullMethod, nil
+ }
+ name := fullMethod[1:]
+ pos := strings.LastIndex(name, "/")
+ if pos < 0 {
+ // Invalid format, does not follow `/package.service/method`.
+ return name, nil
+ }
+ service, method := name[:pos], name[pos+1:]
+
+ var attrs []attribute.KeyValue
+ if service != "" {
+ attrs = append(attrs, semconv.RPCService(service))
+ }
+ if method != "" {
+ attrs = append(attrs, semconv.RPCMethod(method))
+ }
+ return name, attrs
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
new file mode 100644
index 0000000000..3aa37915df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -0,0 +1,87 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+
+ "go.opentelemetry.io/otel/baggage"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type metadataSupplier struct {
+ metadata *metadata.MD
+}
+
+// assert that metadataSupplier implements the TextMapCarrier interface.
+var _ propagation.TextMapCarrier = &metadataSupplier{}
+
+func (s *metadataSupplier) Get(key string) string {
+ values := s.metadata.Get(key)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+func (s *metadataSupplier) Set(key string, value string) {
+ s.metadata.Set(key, value)
+}
+
+func (s *metadataSupplier) Keys() []string {
+ out := make([]string, 0, len(*s.metadata))
+ for key := range *s.metadata {
+ out = append(out, key)
+ }
+ return out
+}
+
+// Inject injects correlation context and span context into the gRPC
+// metadata object. This function is meant to be used on outgoing
+// requests.
+// Deprecated: Unnecessary public func.
+func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
+ c := newConfig(opts, "")
+ c.Propagators.Inject(ctx, &metadataSupplier{
+ metadata: md,
+ })
+}
+
+func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+ propagators.Inject(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+ return metadata.NewOutgoingContext(ctx, md)
+}
+
+// Extract returns the correlation context and span context that
+// another service encoded in the gRPC metadata object with Inject.
+// This function is meant to be used on incoming requests.
+// Deprecated: Unnecessary public func.
+func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
+ c := newConfig(opts, "")
+ ctx = c.Propagators.Extract(ctx, &metadataSupplier{
+ metadata: md,
+ })
+
+ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
+}
+
+func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+
+ return propagators.Extract(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
new file mode 100644
index 0000000000..409c621b74
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// Semantic conventions for attribute keys for gRPC.
+const (
+ // Name of message transmitted or received.
+ RPCNameKey = attribute.Key("name")
+
+ // Type of message transmitted or received.
+ RPCMessageTypeKey = attribute.Key("message.type")
+
+ // Identifier of message transmitted or received.
+ RPCMessageIDKey = attribute.Key("message.id")
+
+ // The compressed size of the message transmitted or received in bytes.
+ RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // The uncompressed size of the message transmitted or received in
+ // bytes.
+ RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+// Semantic conventions for common RPC attributes.
+var (
+ // Semantic convention for gRPC as the remoting system.
+ RPCSystemGRPC = semconv.RPCSystemGRPC
+
+ // Semantic convention for a message named message.
+ RPCNameMessage = RPCNameKey.String("message")
+
+ // Semantic conventions for RPC message types.
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
new file mode 100644
index 0000000000..fad58733fe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -0,0 +1,219 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type gRPCContextKey struct{}
+
+type gRPCContext struct {
+ messagesReceived int64
+ messagesSent int64
+ metricAttrs []attribute.KeyValue
+ record bool
+}
+
+type serverHandler struct {
+ *config
+}
+
+// NewServerHandler creates a stats.Handler for a gRPC server.
+func NewServerHandler(opts ...Option) stats.Handler {
+ h := &serverHandler{
+ config: newConfig(opts, "server"),
+ }
+
+ return h
+}
+
+// TagConn can attach some information to the given context.
+func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
+}
+
+// TagRPC can attach some information to the given context.
+func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ ctx = extract(ctx, h.config.Propagators)
+
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attrs...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
+ return context.WithValue(ctx, gRPCContextKey{}, &gctx)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := true
+ h.handleRPC(ctx, rs, isServer)
+}
+
+type clientHandler struct {
+ *config
+}
+
+// NewClientHandler creates a stats.Handler for a gRPC client.
+func NewClientHandler(opts ...Option) stats.Handler {
+ h := &clientHandler{
+ config: newConfig(opts, "client"),
+ }
+
+ return h
+}
+
+// TagRPC can attach some information to the given context.
+func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ ctx,
+ name,
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attrs...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
+
+ return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := false
+ h.handleRPC(ctx, rs, isServer)
+}
+
+// TagConn can attach some information to the given context.
+func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
+ // no-op
+}
+
+func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag.
+ span := trace.SpanFromContext(ctx)
+ var metricAttrs []attribute.KeyValue
+ var messageId int64
+
+ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
+ if gctx != nil {
+ if !gctx.record {
+ return
+ }
+ metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
+ metricAttrs = append(metricAttrs, gctx.metricAttrs...)
+ }
+
+ switch rs := rs.(type) {
+ case *stats.Begin:
+ case *stats.InPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
+ c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ }
+
+ if c.ReceivedEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeReceived,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.messagesSent, 1)
+ c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ }
+
+ if c.SentEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeSent,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutTrailer:
+ case *stats.OutHeader:
+ if p, ok := peer.FromContext(ctx); ok {
+ span.SetAttributes(peerAttr(p.Addr.String())...)
+ }
+ case *stats.End:
+ var rpcStatusAttr attribute.KeyValue
+
+ if rs.Error != nil {
+ s, _ := status.FromError(rs.Error)
+ if isServer {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ } else {
+ span.SetStatus(codes.Error, s.Message())
+ }
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
+ } else {
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
+ }
+ span.SetAttributes(rpcStatusAttr)
+ span.End()
+
+ metricAttrs = append(metricAttrs, rpcStatusAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
+
+ c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+ if gctx != nil {
+ c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...))
+ c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...))
+ }
+ default:
+ return
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
new file mode 100644
index 0000000000..3f9cfda541
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// Version is the current release version of the gRPC instrumentation.
+func Version() string {
+ return "0.52.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/common/LICENSE
rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
new file mode 100644
index 0000000000..deea149645
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
+// Please be careful of intitialization order - for example, if you change
+// the global propagator, the DefaultClient might still be using the old one.
+var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
+
+// Get is a convenient replacement for http.Get that adds a span around the request.
+func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Head is a convenient replacement for http.Head that adds a span around the request.
+func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Post is a convenient replacement for http.Post that adds a span around the request.
+func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return DefaultClient.Do(req)
+}
+
+// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
+func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
+ return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
new file mode 100644
index 0000000000..214acaf581
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Attribute keys that can be added to a span.
+const (
+ ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
+ ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
+ WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
+ WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
+)
+
+// Server HTTP metrics.
+const (
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+// Client HTTP metrics.
+const (
+ clientRequestSize = "http.client.request.size" // Outgoing request bytes total
+ clientResponseSize = "http.client.response.size" // Outgoing response bytes total
+ clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
+)
+
+// Filter is a predicate used to determine whether a given http.request should
+// be traced. A Filter must return true if the request should be traced.
+type Filter func(*http.Request) bool
+
+func newTracer(tp trace.TracerProvider) trace.Tracer {
+ return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
new file mode 100644
index 0000000000..c1015a9ecc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -0,0 +1,196 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptrace"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ScopeName is the instrumentation scope name.
+const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// config represents the configuration options available for the http.Handler
+// and http.Transport types.
+type config struct {
+ ServerName string
+ Tracer trace.Tracer
+ Meter metric.Meter
+ Propagators propagation.TextMapPropagator
+ SpanStartOptions []trace.SpanStartOption
+ PublicEndpoint bool
+ PublicEndpointFn func(*http.Request) bool
+ ReadEvent bool
+ WriteEvent bool
+ Filters []Filter
+ SpanNameFormatter func(string, *http.Request) string
+ ClientTrace func(context.Context) *httptrace.ClientTrace
+
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+}
+
+// Option interface used for setting optional config properties.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// newConfig creates a new config struct and applies opts to it.
+func newConfig(opts ...Option) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+
+ // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
+ if c.TracerProvider != nil {
+ c.Tracer = newTracer(c.TracerProvider)
+ }
+
+ c.Meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ )
+
+ return c
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithMeterProvider specifies a meter provider to use for creating a meter.
+// If none is specified, the global provider is used.
+func WithMeterProvider(provider metric.MeterProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.MeterProvider = provider
+ }
+ })
+}
+
+// WithPublicEndpoint configures the Handler to link the span with an incoming
+// span context. If this option is not provided, then the association is a child
+// association instead of a link.
+func WithPublicEndpoint() Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpoint = true
+ })
+}
+
+// WithPublicEndpointFn runs with every request, and allows conditionnally
+// configuring the Handler to link the span with an incoming span context. If
+// this option is not provided or returns false, then the association is a
+// child association instead of a link.
+// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
+func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpointFn = fn
+ })
+}
+
+// WithPropagators configures specific propagators. If this
+// option isn't specified, then the global TextMapPropagator is used.
+func WithPropagators(ps propagation.TextMapPropagator) Option {
+ return optionFunc(func(c *config) {
+ if ps != nil {
+ c.Propagators = ps
+ }
+ })
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return optionFunc(func(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, opts...)
+ })
+}
+
+// WithFilter adds a filter to the list of filters used by the handler.
+// If any filter indicates to exclude a request then the request will not be
+// traced. All filters must allow a request to be traced for a Span to be created.
+// If no filters are provided then all requests are traced.
+// Filters will be invoked for each processed request, it is advised to make them
+// simple and fast.
+func WithFilter(f Filter) Option {
+ return optionFunc(func(c *config) {
+ c.Filters = append(c.Filters, f)
+ })
+}
+
+type event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReadEvents event = iota
+ WriteEvents
+)
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
+// using the ReadBytesKey
+// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
+// using the WriteBytesKey
+func WithMessageEvents(events ...event) Option {
+ return optionFunc(func(c *config) {
+ for _, e := range events {
+ switch e {
+ case ReadEvents:
+ c.ReadEvent = true
+ case WriteEvents:
+ c.WriteEvent = true
+ }
+ }
+ })
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
+ return optionFunc(func(c *config) {
+ c.SpanNameFormatter = f
+ })
+}
+
+// WithClientTrace takes a function that returns client trace instance that will be
+// applied to the requests sent through the otelhttp Transport.
+func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
+ return optionFunc(func(c *config) {
+ c.ClientTrace = f
+ })
+}
+
+// WithServerName returns an Option that sets the name of the (virtual) server
+// handling requests.
+func WithServerName(server string) Option {
+ return optionFunc(func(c *config) {
+ c.ServerName = server
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
new file mode 100644
index 0000000000..56b24b982a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package otelhttp provides an http.Handler and functions that are intended
+// to be used to add tracing by wrapping existing handlers (with Handler) and
+// routes WithRouteTag.
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
new file mode 100644
index 0000000000..c64f8beca7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -0,0 +1,251 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/felixge/httpsnoop"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// middleware is an http middleware which wraps the next handler in a span.
+type middleware struct {
+ operation string
+ server string
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ readEvent bool
+ writeEvent bool
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ publicEndpoint bool
+ publicEndpointFn func(*http.Request) bool
+
+ traceSemconv semconv.HTTPServer
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
+}
+
+func defaultHandlerFormatter(operation string, _ *http.Request) string {
+ return operation
+}
+
+// NewHandler wraps the passed handler in a span named after the operation and
+// enriches it with metrics.
+func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
+ return NewMiddleware(operation, opts...)(handler)
+}
+
+// NewMiddleware returns a tracing and metrics instrumentation middleware.
+// The handler returned by the middleware wraps a handler
+// in a span named after the operation and enriches it with metrics.
+func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
+ h := middleware{
+ operation: operation,
+
+ traceSemconv: semconv.NewHTTPServer(),
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
+ WithSpanNameFormatter(defaultHandlerFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ h.configure(c)
+ h.createMeasures()
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.serveHTTP(w, r, next)
+ })
+ }
+}
+
+func (h *middleware) configure(c *config) {
+ h.tracer = c.Tracer
+ h.meter = c.Meter
+ h.propagators = c.Propagators
+ h.spanStartOptions = c.SpanStartOptions
+ h.readEvent = c.ReadEvent
+ h.writeEvent = c.WriteEvent
+ h.filters = c.Filters
+ h.spanNameFormatter = c.SpanNameFormatter
+ h.publicEndpoint = c.PublicEndpoint
+ h.publicEndpointFn = c.PublicEndpointFn
+ h.server = c.ServerName
+}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
+
+func (h *middleware) createMeasures() {
+ var err error
+ h.requestBytesCounter, err = h.meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ h.responseBytesCounter, err = h.meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ h.serverLatencyMeasure, err = h.meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+// serveHTTP sets up tracing and calls the given next http.Handler with the span
+// context injected into the request context.
+func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
+ requestStartTime := time.Now()
+ for _, f := range h.filters {
+ if !f(r) {
+ // Simply pass through to the handler if a filter rejects the request
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
+ }
+
+ opts = append(opts, h.spanStartOptions...)
+ if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
+ opts = append(opts, trace.WithNewRoot())
+ // Linking incoming span context if any for public endpoint.
+ if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
+ opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
+ }
+ }
+
+ tracer := h.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
+ defer span.End()
+
+ readRecordFunc := func(int64) {}
+ if h.readEvent {
+ readRecordFunc = func(n int64) {
+ span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
+ }
+ }
+
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ bw.record = readRecordFunc
+ r.Body = &bw
+ }
+
+ writeRecordFunc := func(int64) {}
+ if h.writeEvent {
+ writeRecordFunc = func(n int64) {
+ span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
+ }
+ }
+
+ rww := &respWriterWrapper{
+ ResponseWriter: w,
+ record: writeRecordFunc,
+ ctx: ctx,
+ props: h.propagators,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+
+ // Wrap w to use our ResponseWriter methods while also exposing
+ // other interfaces that w may implement (http.CloseNotifier,
+ // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
+ return rww.Header
+ },
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return rww.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return rww.WriteHeader
+ },
+ })
+
+ labeler := &Labeler{}
+ ctx = injectLabeler(ctx, labeler)
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+
+ span.SetStatus(semconv.ServerStatus(rww.statusCode))
+ span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+ StatusCode: rww.statusCode,
+ ReadBytes: bw.read.Load(),
+ ReadError: bw.err,
+ WriteBytes: rww.written,
+ WriteError: rww.err,
+ })...)
+
+ // Add metrics
+ attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
+ if rww.statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
+ }
+ o := metric.WithAttributes(attributes...)
+ h.requestBytesCounter.Add(ctx, bw.read.Load(), o)
+ h.responseBytesCounter.Add(ctx, rww.written, o)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
+}
+
+// WithRouteTag annotates spans and metrics with the provided route name
+// with HTTP route attribute.
+func WithRouteTag(route string, h http.Handler) http.Handler {
+ attr := semconv.NewHTTPServer().Route(route)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ span := trace.SpanFromContext(r.Context())
+ span.SetAttributes(attr)
+
+ labeler, _ := LabelerFromContext(r.Context())
+ labeler.Add(attr)
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
new file mode 100644
index 0000000000..9be3feef29
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "fmt"
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+)
+
+type ResponseTelemetry struct {
+ StatusCode int
+ ReadBytes int64
+ ReadError error
+ WriteBytes int64
+ WriteError error
+}
+
+type HTTPServer interface {
+ // RequestTraceAttrs returns trace attributes for an HTTP request received by a
+ // server.
+ //
+ // The server must be the primary server name if it is known. For example this
+ // would be the ServerName directive
+ // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+ // server, and the server_name directive
+ // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+ // nginx server. More generically, the primary server name would be the host
+ // header value that matches the default virtual host of an HTTP server. It
+ // should include the host identifier and if a port is used to route to the
+ // server that port identifier should be included as an appropriate port
+ // suffix.
+ //
+ // If the primary server name is not known, server should be an empty string.
+ // The req Host will be used to determine the server instead.
+ RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue
+
+ // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+ //
+ // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+ ResponseTraceAttrs(ResponseTelemetry) []attribute.KeyValue
+
+ // Route returns the attribute for the route.
+ Route(string) attribute.KeyValue
+}
+
+// var warnOnce = sync.Once{}
+
+func NewHTTPServer() HTTPServer {
+ // TODO (#5331): Detect version based on environment variable OTEL_HTTP_CLIENT_COMPATIBILITY_MODE.
+ // TODO (#5331): Add warning of use of a deprecated version of Semantic Versions.
+ return oldHTTPServer{}
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
new file mode 100644
index 0000000000..c92076bc3d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+)
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
new file mode 100644
index 0000000000..d753083b7b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "io"
+ "net/http"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+type oldHTTPServer struct{}
+
+var _ HTTPServer = oldHTTPServer{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ return semconvutil.HTTPServerRequest(server, req)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ attributes := []attribute.KeyValue{}
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
+ }
+ if resp.ReadError != nil && resp.ReadError != io.EOF {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
+ }
+ if resp.WriteError != nil && resp.WriteError != io.EOF {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (o oldHTTPServer) Route(route string) attribute.KeyValue {
+ return semconv.HTTPRoute(route)
+}
+
+// HTTPStatusCode returns the attribute for the HTTP status code.
+// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
+func HTTPStatusCode(status int) attribute.KeyValue {
+ return semconv.HTTPStatusCode(status)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
new file mode 100644
index 0000000000..7aa5f99e81
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+// Generate semconvutil package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
new file mode 100644
index 0000000000..a73bb06e90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
@@ -0,0 +1,575 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// HTTPClientResponse returns trace attributes for an HTTP response received by a
+// client from a server. It will return the following attributes if the related
+// values are defined in resp: "http.status.code",
+// "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...)
+func HTTPClientResponse(resp *http.Response) []attribute.KeyValue {
+ return hc.ClientResponse(resp)
+}
+
+// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length".
+func HTTPClientRequest(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequest(req)
+}
+
+// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the
+// related values are defined in req: "net.peer.port".
+func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequestMetrics(req)
+}
+
+// HTTPClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func HTTPClientStatus(code int) (codes.Code, string) {
+ return hc.ClientStatus(code)
+}
+
+// HTTPServerRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if
+// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip".
+func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequest(server, req)
+}
+
+// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequestMetrics(server, req)
+}
+
+// HTTPServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func HTTPServerStatus(code int) (codes.Code, string) {
+ return hc.ServerStatus(code)
+}
+
+// httpConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type httpConv struct {
+ NetConv *netConv
+
+ HTTPClientIPKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPResponseContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ UserAgentOriginalKey attribute.Key
+}
+
+var hc = &httpConv{
+ NetConv: nc,
+
+ HTTPClientIPKey: semconv.HTTPClientIPKey,
+ HTTPMethodKey: semconv.HTTPMethodKey,
+ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
+ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+ HTTPRouteKey: semconv.HTTPRouteKey,
+ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
+ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
+ HTTPTargetKey: semconv.HTTPTargetKey,
+ HTTPURLKey: semconv.HTTPURLKey,
+ UserAgentOriginalKey: semconv.UserAgentOriginalKey,
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.status_code int
+ http.response_content_length int
+ */
+ var n int
+ if resp.StatusCode > 0 {
+ n++
+ }
+ if resp.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+ }
+ if resp.ContentLength > 0 {
+ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+ }
+ return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "user_agent.original",
+// "http.request_content_length", "user_agent.original".
+func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ user_agent.original string
+ http.url string
+ net.peer.name string
+ net.peer.port int
+ http.request_content_length int
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response. See ClientResponse.
+ http.response_content_length This requires the response. See ClientResponse.
+ net.sock.family This requires the socket used.
+ net.sock.peer.addr This requires the socket used.
+ net.sock.peer.name This requires the socket used.
+ net.sock.peer.port This requires the socket used.
+ http.resend_count This is something outside of a single request.
+ net.protocol.name The value is the Request is ignored, and the go client will always use "http".
+ net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
+ */
+ n := 3 // URL, peer name, proto, and method.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ if req.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, c.HTTPURLKey.String(u))
+
+ attrs = append(attrs, c.NetConv.PeerName(peer))
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if l := req.ContentLength; l > 0 {
+ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+ }
+
+ return attrs
+}
+
+// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the related values
+// are defined in req: "net.peer.port".
+func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
+
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if they
+// related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip",
+// "net.protocol.name", "net.protocol.version".
+func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.scheme string
+ net.host.name string
+ net.host.port int
+ net.sock.peer.addr string
+ net.sock.peer.port int
+ user_agent.original string
+ http.client_ip string
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ http.target string Note: doesn't include the query parameter.
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response.
+ http.request_content_length This requires the len() of body, which can mutate it.
+ http.response_content_length This requires the response.
+ http.route This is not available.
+ net.sock.peer.name This would require a DNS lookup.
+ net.sock.host.addr The request doesn't have access to the underlying socket.
+ net.sock.host.port The request doesn't have access to the underlying socket.
+
+ */
+ n := 4 // Method, scheme, proto, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ peer, peerPort := splitHostPort(req.RemoteAddr)
+ if peer != "" {
+ n++
+ if peerPort > 0 {
+ n++
+ }
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ n++
+ }
+
+ var target string
+ if req.URL != nil {
+ target = req.URL.Path
+ if target != "" {
+ n++
+ }
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+ }
+
+ if target != "" {
+ attrs = append(attrs, c.HTTPTargetKey.String(target))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+// ServerRequestMetrics returns metric attributes for an HTTP request received
+// by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "net.host.name". The following attributes are returned if they related
+// values are defined in req: "net.host.port".
+func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.scheme string
+ http.route string
+ http.method string
+ http.status_code int
+ net.host.name string
+ net.host.port int
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ */
+
+ n := 3 // Method, scheme, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.methodMetric(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+ if protoName != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+func (c *httpConv) method(method string) attribute.KeyValue {
+ if method == "" {
+ return c.HTTPMethodKey.String(http.MethodGet)
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) methodMetric(method string) attribute.KeyValue {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return c.HTTPSchemeHTTPS
+ }
+ return c.HTTPSchemeHTTP
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+ for _, hostport := range source {
+ host, port = splitHostPort(hostport)
+ if host != "" || port > 0 {
+ break
+ }
+ }
+ return
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
new file mode 100644
index 0000000000..d5c0093fc4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -0,0 +1,205 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/netconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// NetTransport returns a trace attribute describing the transport protocol of the
+// passed network. See the net.Dial for information about acceptable network
+// values.
+func NetTransport(network string) attribute.KeyValue {
+ return nc.Transport(network)
+}
+
+// netConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type netConv struct {
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetProtocolName attribute.Key
+ NetProtocolVersion attribute.Key
+ NetSockFamilyKey attribute.Key
+ NetSockPeerAddrKey attribute.Key
+ NetSockPeerPortKey attribute.Key
+ NetSockHostAddrKey attribute.Key
+ NetSockHostPortKey attribute.Key
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportInProc attribute.KeyValue
+}
+
+var nc = &netConv{
+ NetHostNameKey: semconv.NetHostNameKey,
+ NetHostPortKey: semconv.NetHostPortKey,
+ NetPeerNameKey: semconv.NetPeerNameKey,
+ NetPeerPortKey: semconv.NetPeerPortKey,
+ NetProtocolName: semconv.NetProtocolNameKey,
+ NetProtocolVersion: semconv.NetProtocolVersionKey,
+ NetSockFamilyKey: semconv.NetSockFamilyKey,
+ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+ NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+ NetSockHostAddrKey: semconv.NetSockHostAddrKey,
+ NetSockHostPortKey: semconv.NetSockHostPortKey,
+ NetTransportOther: semconv.NetTransportOther,
+ NetTransportTCP: semconv.NetTransportTCP,
+ NetTransportUDP: semconv.NetTransportUDP,
+ NetTransportInProc: semconv.NetTransportInProc,
+}
+
+func (c *netConv) Transport(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return c.NetTransportTCP
+ case "udp", "udp4", "udp6":
+ return c.NetTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return c.NetTransportInProc
+ default:
+ // "ip:*", "ip4:*", and "ip6:*" all are considered other.
+ return c.NetTransportOther
+ }
+}
+
+// Host returns attributes for a network host address.
+func (c *netConv) Host(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.HostName(h))
+ if p > 0 {
+ attrs = append(attrs, c.HostPort(int(p)))
+ }
+ return attrs
+}
+
+func (c *netConv) HostName(name string) attribute.KeyValue {
+ return c.NetHostNameKey.String(name)
+}
+
+func (c *netConv) HostPort(port int) attribute.KeyValue {
+ return c.NetHostPortKey.Int(port)
+}
+
+func family(network, address string) string {
+ switch network {
+ case "unix", "unixgram", "unixpacket":
+ return "unix"
+ default:
+ if ip := net.ParseIP(address); ip != nil {
+ if ip.To4() == nil {
+ return "inet6"
+ }
+ return "inet"
+ }
+ }
+ return ""
+}
+
+// Peer returns attributes for a network peer address.
+func (c *netConv) Peer(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.PeerName(h))
+ if p > 0 {
+ attrs = append(attrs, c.PeerPort(int(p)))
+ }
+ return attrs
+}
+
+func (c *netConv) PeerName(name string) attribute.KeyValue {
+ return c.NetPeerNameKey.String(name)
+}
+
+func (c *netConv) PeerPort(port int) attribute.KeyValue {
+ return c.NetPeerPortKey.Int(port)
+}
+
+func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
+ return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
+ return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ name = strings.ToLower(name)
+ return name, version
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
new file mode 100644
index 0000000000..1548b2db63
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
+// the metrics recorded by the net/http instrumentation.
+type Labeler struct {
+ mu sync.Mutex
+ attributes []attribute.KeyValue
+}
+
+// Add attributes to a Labeler.
+func (l *Labeler) Add(ls ...attribute.KeyValue) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.attributes = append(l.attributes, ls...)
+}
+
+// Get returns a copy of the attributes added to the Labeler.
+func (l *Labeler) Get() []attribute.KeyValue {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ ret := make([]attribute.KeyValue, len(l.attributes))
+ copy(ret, l.attributes)
+ return ret
+}
+
+type labelerContextKeyType int
+
+const lablelerContextKey labelerContextKeyType = 0
+
+func injectLabeler(ctx context.Context, l *Labeler) context.Context {
+ return context.WithValue(ctx, lablelerContextKey, l)
+}
+
+// LabelerFromContext retrieves a Labeler instance from the provided context if
+// one is available. If no Labeler was found in the provided context a new, empty
+// Labeler is returned and the second return value is false. In this case it is
+// safe to use the Labeler but any attributes added to it will not be used.
+func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
+ l, ok := ctx.Value(lablelerContextKey).(*Labeler)
+ if !ok {
+ l = &Labeler{}
+ }
+ return l, ok
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
new file mode 100644
index 0000000000..8a25e58657
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -0,0 +1,275 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel/metric"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// Transport implements the http.RoundTripper interface and wraps
+// outbound HTTP(S) requests with a span and enriches it with metrics.
+type Transport struct {
+ rt http.RoundTripper
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ latencyMeasure metric.Float64Histogram
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewTransport wraps the provided http.RoundTripper with one that
+// starts a span, injects the span context into the outbound request headers,
+// and enriches it with metrics.
+//
+// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
+// as the base http.RoundTripper.
+func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
+ if base == nil {
+ base = http.DefaultTransport
+ }
+
+ t := Transport{
+ rt: base,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
+ WithSpanNameFormatter(defaultTransportFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ t.applyConfig(c)
+ t.createMeasures()
+
+ return &t
+}
+
+func (t *Transport) applyConfig(c *config) {
+ t.tracer = c.Tracer
+ t.meter = c.Meter
+ t.propagators = c.Propagators
+ t.spanStartOptions = c.SpanStartOptions
+ t.filters = c.Filters
+ t.spanNameFormatter = c.SpanNameFormatter
+ t.clientTrace = c.ClientTrace
+}
+
+func (t *Transport) createMeasures() {
+ var err error
+ t.requestBytesCounter, err = t.meter.Int64Counter(
+ clientRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ t.responseBytesCounter, err = t.meter.Int64Counter(
+ clientResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ t.latencyMeasure, err = t.meter.Float64Histogram(
+ clientDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of outbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+func defaultTransportFormatter(_ string, r *http.Request) string {
+ return "HTTP " + r.Method
+}
+
+// RoundTrip creates a Span and propagates its context via the provided request's headers
+// before handing the request to the configured base RoundTripper. The created span will
+// end when the response body is closed or when a read from the body returns io.EOF.
+func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
+ requestStartTime := time.Now()
+ for _, f := range t.filters {
+ if !f(r) {
+ // Simply pass through to the base RoundTripper if a filter rejects the request
+ return t.rt.RoundTrip(r)
+ }
+ }
+
+ tracer := t.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
+
+ ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
+
+ if t.clientTrace != nil {
+ ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
+ }
+
+ labeler := &Labeler{}
+ ctx = injectLabeler(ctx, labeler)
+
+ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
+
+ // use a body wrapper to determine the request size
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ // noop to prevent nil panic. not using this record fun yet.
+ bw.record = func(int64) {}
+ r.Body = &bw
+ }
+
+ span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+ t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
+
+ res, err := t.rt.RoundTrip(r)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ span.End()
+ return res, err
+ }
+
+ // metrics
+ metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
+ if res.StatusCode > 0 {
+ metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
+ }
+ o := metric.WithAttributes(metricAttrs...)
+ t.requestBytesCounter.Add(ctx, bw.read.Load(), o)
+ // For handling response bytes we leverage a callback when the client reads the http response
+ readRecordFunc := func(n int64) {
+ t.responseBytesCounter.Add(ctx, n, o)
+ }
+
+ // traces
+ span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
+ span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ t.latencyMeasure.Record(ctx, elapsedTime, o)
+
+ return res, err
+}
+
+// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
+// io.ReadCloser. If the passed body implements io.Writer, the returned value
+// will implement io.ReadWriteCloser.
+func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser {
+ // The successful protocol switch responses will have a body that
+ // implement an io.ReadWriteCloser. Ensure this interface type continues
+ // to be satisfied if that is the case.
+ if _, ok := body.(io.ReadWriteCloser); ok {
+ return &wrappedBody{span: span, record: record, body: body}
+ }
+
+ // Remove the implementation of the io.ReadWriteCloser and only implement
+ // the io.ReadCloser.
+ return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}}
+}
+
+// wrappedBody is the response body type returned by the transport
+// instrumentation to complete a span. Errors encountered when using the
+// response body are recorded in span tracking the response.
+//
+// The span tracking the response is ended when this body is closed.
+//
+// If the response body implements the io.Writer interface (i.e. for
+// successful protocol switches), the wrapped body also will.
+type wrappedBody struct {
+ span trace.Span
+ recorded atomic.Bool
+ record func(n int64)
+ body io.ReadCloser
+ read atomic.Int64
+}
+
+var _ io.ReadWriteCloser = &wrappedBody{}
+
+func (wb *wrappedBody) Write(p []byte) (int, error) {
+ // This will not panic given the guard in newWrappedBody.
+ n, err := wb.body.(io.Writer).Write(p)
+ if err != nil {
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+func (wb *wrappedBody) Read(b []byte) (int, error) {
+ n, err := wb.body.Read(b)
+ // Record the number of bytes read
+ wb.read.Add(int64(n))
+
+ switch err {
+ case nil:
+ // nothing to do here but fall through to the return
+ case io.EOF:
+ wb.recordBytesRead()
+ wb.span.End()
+ default:
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once.
+func (wb *wrappedBody) recordBytesRead() {
+ // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that
+ // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using
+ // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish
+ // calling wb.record(wb.read.Load()).
+ if wb.recorded.CompareAndSwap(false, true) {
+ // Record the total number of bytes read
+ wb.record(wb.read.Load())
+ }
+}
+
+func (wb *wrappedBody) Close() error {
+ wb.recordBytesRead()
+ wb.span.End()
+ if wb.body != nil {
+ return wb.body.Close()
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
new file mode 100644
index 0000000000..22e485dd7d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// Version is the current release version of the otelhttp instrumentation.
+func Version() string {
+ return "0.52.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
new file mode 100644
index 0000000000..2f4cc124dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/propagation"
+)
+
+var _ io.ReadCloser = &bodyWrapper{}
+
+// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type bodyWrapper struct {
+ io.ReadCloser
+ record func(n int64) // must not be nil
+
+ read atomic.Int64
+ err error
+}
+
+func (w *bodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+ w.read.Add(n1)
+ w.err = err
+ w.record(n1)
+ return n, err
+}
+
+func (w *bodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+var _ http.ResponseWriter = &respWriterWrapper{}
+
+// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
+// that may be useful when using it in real life situations.
+type respWriterWrapper struct {
+ http.ResponseWriter
+ record func(n int64) // must not be nil
+
+ // used to inject the header
+ ctx context.Context
+
+ props propagation.TextMapPropagator
+
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+func (w *respWriterWrapper) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+func (w *respWriterWrapper) Write(p []byte) (int, error) {
+ if !w.wroteHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.record(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *respWriterWrapper) WriteHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index cb28b36b99..e5946bfb25 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,41 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.27.0/0.49.0/0.3.0] 2024-05-21
+
+### Added
+
+- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
+- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
+- Add metrics in the `otel-collector` example. (#5283)
+- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
+ - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
+ - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
+- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
+
+### Changed
+
+- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
+- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
+- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
+- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
+- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
+- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
+- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
+- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
+
+### Fixed
+
+- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
+- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
+- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
+- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
+- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
+
## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
### Added
@@ -33,6 +68,11 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
+- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
+
+### Fixed
+
+- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
@@ -2921,7 +2961,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.26.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...HEAD
+[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 31d336d922..88f4c7d0e0 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+* @MrAlias @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole
\ No newline at end of file
+CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 7847b45908..2176ce5261 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -570,6 +570,9 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
+See also:
+[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
+
### Testing
The tests should never leak goroutines.
@@ -629,7 +632,6 @@ should be canceled.
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
@@ -643,6 +645,7 @@ should be canceled.
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Become an Approver or a Maintainer
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index ca2f0ad037..a9845a88f6 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -14,8 +14,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
-precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default
-ci: generate dependabot-check license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage
+precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default
+ci: generate license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage
# Tools
@@ -39,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-DBOTCONF = $(TOOLS)/dbotconf
-$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
-
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
@@ -70,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -252,15 +249,6 @@ license-check:
exit 1; \
fi
-DEPENDABOT_CONFIG = .github/dependabot.yml
-.PHONY: dependabot-check
-dependabot-check: $(DBOTCONF)
- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
-
-.PHONY: dependabot-generate
-dependabot-generate: $(DBOTCONF)
- @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
-
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 47f9a41f66..5a89093173 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s
|---------|--------------------|
| Traces | Stable |
| Metrics | Stable |
-| Logs | In development[^1] |
+| Logs | Beta[^1] |
Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@@ -97,12 +97,12 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-| Exporter | Metrics | Traces |
-|---------------------------------------|:-------:|:------:|
-| [OTLP](./exporters/otlp/) | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | ✓ |
+| Exporter | Logs | Metrics | Traces |
+|---------------------------------------|:----:|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | | ✓ |
## Contributing
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index d2691d0bd8..940f57f3d8 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+## Verify changes for contrib repository
+
+If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
+
+Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
+
## Pre-Release
First, decide which module sets will be released and update their versions
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index b320314133..9ea0ecbbd2 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -231,15 +231,27 @@ func (v Value) Emit() string {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(v.asInt64Slice())
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(v.asFloat64Slice())
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(v.asStringSlice())
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
case STRING:
return v.stringly
default:
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 75773bc1ce..f98c54a3cb 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -335,9 +335,9 @@ func (m Member) String() string {
// A key is just an ASCII string. A value is restricted to be
// US-ASCII characters excluding CTLs, whitespace,
// DQUOTE, comma, semicolon, and backslash.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
+ s := m.key + keyValueDelimiter + valueEscape(m.value)
if len(m.properties) > 0 {
- s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
+ s += propertyDelimiter + m.properties.String()
}
return s
}
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/procfs/LICENSE
rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md
new file mode 100644
index 0000000000..9184068d89
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Metric gRPC Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
new file mode 100644
index 0000000000..428cfea233
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -0,0 +1,200 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+type client struct {
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // ourConn keeps track of where conn was created: true if created here in
+ // NewClient, or false if passed with an option. This is important on
+ // Shutdown as the conn should only be closed if we created it. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ msc colmetricpb.MetricsServiceClient
+}
+
+// newClient creates a new gRPC metric client.
+func newClient(_ context.Context, cfg oconf.Config) (*client, error) {
+ c := &client{
+ exportTimeout: cfg.Metrics.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Metrics.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Metrics.Headers)
+ }
+
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
+ dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+ dialOpts = append(dialOpts, cfg.DialOptions...)
+
+ conn, err := grpc.NewClient(cfg.Metrics.Endpoint, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
+
+ return c, nil
+}
+
+// Shutdown shuts down the client, freeing all resource.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+func (c *client) Shutdown(ctx context.Context) error {
+ // The otlpmetric.Exporter synchronizes access to client methods and
+ // ensures this is called only once. The only thing that needs to be done
+ // here is to release any computational resources the client holds.
+
+ c.metadata = nil
+ c.requestFunc = nil
+ c.msc = nil
+
+ err := ctx.Err()
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ c.conn = nil
+ return err
+}
+
+// UploadMetrics sends protoMetrics to connected endpoint.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+ // The otlpmetric.Exporter synchronizes access to client methods, and
+ // ensures this is not called after the Exporter is shutdown. Only thing
+ // to do here is send data.
+
+ select {
+ case <-ctx.Done():
+ // Do not upload if the context is already expired.
+ return ctx.Err()
+ default:
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedDataPoints()
+ if n != 0 || msg != "" {
+ err := internal.MetricPartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function based on the clients configured export timeout.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+ }
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally, handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
new file mode 100644
index 0000000000..38d7d60d40
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -0,0 +1,264 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// Option applies a configuration option to the Exporter.
+type Option interface {
+ applyGRPCOption(oconf.Config) oconf.Config
+}
+
+func asGRPCOptions(opts []Option) []oconf.GRPCOption {
+ converted := make([]oconf.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying the export of metric data
+// that failed.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ oconf.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the Exporter's gRPC
+// connection, just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used to determine client security. If the endpoint has a
+// scheme of "http" or "unix" client security will be disabled. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, client security will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{oconf.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{oconf.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL the Exporter will connect to.
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{oconf.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) oconf.Compression {
+ if compressor == "gzip" {
+ return oconf.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return oconf.NoCompression
+}
+
+// WithCompressor sets the compressor the gRPC client uses.
+// Supported compressor values: "gzip".
+//
+// If the OTEL_EXPORTER_OTLP_COMPRESSION or
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
+// this option is not passed, that variable value will be used. That value can
+// be either "none" or "gzip". If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no compressor will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+ return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+//
+// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as a list of key value pairs.
+// These pairs are expected to be in the W3C Correlation-Context format
+// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
+// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no user headers will be set.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{oconf.WithHeaders(headers)}
+}
+
+// WithTLSCredentials sets the gRPC connection to use creds.
+//
+// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
+// this option is not passed, that variable value will be used. The value will
+// be parsed the filepath of the TLS certificate chain to use. If both are
+// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, no TLS credentials will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.Metrics.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when establishing a
+// gRPC connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The Exporter
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time an Exporter will attempt an export.
+//
+// This takes precedence over any retry settings defined by WithRetry. Once
+// this time limit has been reached the export is abandoned and the metric
+// data is dropped.
+//
+// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. The value will be parsed as an integer representing the
+// timeout in milliseconds. If both are set,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, a timeout of 10 seconds will be used.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{oconf.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that are
+// returned by the target endpoint.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response, that time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{oconf.WithRetry(retry.Config(settings))}
+}
+
+// WithTemporalitySelector sets the TemporalitySelector the client will use to
+// determine the Temporality of an instrument based on its kind. If this option
+// is not used, the client will use the DefaultTemporalitySelector from the
+// go.opentelemetry.io/otel/sdk/metric package.
+func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
+ return wrappedOption{oconf.WithTemporalitySelector(selector)}
+}
+
+// WithAggregationSelector sets the AggregationSelector the client will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
+// explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector metric.AggregationSelector) Option {
+ return wrappedOption{oconf.WithAggregationSelector(selector)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
new file mode 100644
index 0000000000..3d74ef1a01
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
+OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+ - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+ - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+ - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+ Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+ - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+ - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
new file mode 100644
index 0000000000..462dc8a7a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+// Exporter is a OpenTelemetry metric Exporter using gRPC.
+type Exporter struct {
+ // Ensure synchronous access to the client across all functionality.
+ clientMu sync.Mutex
+ client interface {
+ UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
+ Shutdown(context.Context) error
+ }
+
+ temporalitySelector metric.TemporalitySelector
+ aggregationSelector metric.AggregationSelector
+
+ shutdownOnce sync.Once
+}
+
+func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
+ ts := cfg.Metrics.TemporalitySelector
+ if ts == nil {
+ ts = func(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+ }
+ }
+
+ as := cfg.Metrics.AggregationSelector
+ if as == nil {
+ as = metric.DefaultAggregationSelector
+ }
+
+ return &Exporter{
+ client: c,
+
+ temporalitySelector: ts,
+ aggregationSelector: as,
+ }, nil
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.temporalitySelector(k)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.aggregationSelector(k)
+}
+
+// Export transforms and transmits metric data to an OTLP receiver.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
+
+ otlpRm, err := transform.ResourceMetrics(rm)
+ // Best effort upload of transformable metrics.
+ e.clientMu.Lock()
+ upErr := e.client.UploadMetrics(ctx, otlpRm)
+ e.clientMu.Unlock()
+ if upErr != nil {
+ if err == nil {
+ return fmt.Errorf("failed to upload metrics: %w", upErr)
+ }
+ // Merge the two errors.
+ return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
+ }
+ return err
+}
+
+// ForceFlush flushes any metric data held by an exporter.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) ForceFlush(ctx context.Context) error {
+ // The exporter and client hold no state, nothing to flush.
+ return ctx.Err()
+}
+
+// Shutdown flushes all metric data held by an exporter and releases any held
+// computational resources.
+//
+// This method returns an error if called after Shutdown.
+// This method returns an error if the method is canceled by the passed context.
+//
+// This method is safe to call concurrently.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ e.clientMu.Lock()
+ client := e.client
+ e.client = shutdownClient{}
+ e.clientMu.Unlock()
+ err = client.Shutdown(ctx)
+ })
+ return err
+}
+
+var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
+
+type shutdownClient struct{}
+
+func (c shutdownClient) err(ctx context.Context) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errShutdown
+}
+
+func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
+ return c.err(ctx)
+}
+
+func (c shutdownClient) Shutdown(ctx context.Context) error {
+ return c.err(ctx)
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct{ Type string }{Type: "OTLP/gRPC"}
+}
+
+// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
+// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
+// endpoint using gRPC.
+//
+// If an already established gRPC ClientConn is not passed in options using
+// WithGRPCConn, a connection to the OTLP endpoint will be established based
+// on options. If a connection cannot be establishes in the lifetime of ctx,
+// an error will be returned.
+func New(ctx context.Context, options ...Option) (*Exporter, error) {
+ cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
+ c, err := newClient(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ return newExporter(c, cfg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000..b2735ba923
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,191 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.PathUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
new file mode 100644
index 0000000000..95e2f4ba3b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
new file mode 100644
index 0000000000..7ae53f2d18
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -0,0 +1,210 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Metrics.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
+ withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ )
+
+ return opts
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
+
+func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "cumulative":
+ fn(cumulativeTemporality)
+ case "delta":
+ fn(deltaTemporality)
+ case "lowmemory":
+ fn(lowMemory)
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
+
+func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
+ switch ik {
+ case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if s, ok := e.GetEnvValue(n); ok {
+ switch strings.ToLower(s) {
+ case "explicit_bucket_histogram":
+ fn(metric.DefaultAggregationSelector)
+ case "base2_exponential_bucket_histogram":
+ fn(func(kind metric.InstrumentKind) metric.Aggregation {
+ if kind == metric.InstrumentKindHistogram {
+ return metric.AggregationBase2ExponentialHistogram{
+ MaxSize: 160,
+ MaxScale: 20,
+ NoMinMax: false,
+ }
+ }
+ return metric.DefaultAggregationSelector(kind)
+ })
+ default:
+ global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ }
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
new file mode 100644
index 0000000000..b6ed9a2bb6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -0,0 +1,376 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+const (
+ // DefaultMaxAttempts describes how many times the driver
+ // should retry the sending of the payload in case of a
+ // retryable error.
+ DefaultMaxAttempts int = 5
+ // DefaultMetricsPath is a default URL path for endpoint that
+ // receives metrics.
+ DefaultMetricsPath string = "/v1/metrics"
+ // DefaultBackoff is a default base backoff time used in the
+ // exponential backoff strategy.
+ DefaultBackoff time.Duration = 300 * time.Millisecond
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span or metrics batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ TemporalitySelector metric.TemporalitySelector
+ AggregationSelector metric.AggregationSelector
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Metrics SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ cfg := Config{
+ Metrics: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultMetricsPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+
+ TemporalitySelector: metric.DefaultTemporalitySelector,
+ AggregationSelector: metric.DefaultAggregationSelector,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Metrics.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
+ } else if cfg.Metrics.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Metrics.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Metrics.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Endpoint = endpoint
+ return cfg
+ })
+}
+
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlpmetric: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Metrics.Endpoint = u.Host
+ cfg.Metrics.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Metrics.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Metrics.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Timeout = duration
+ return cfg
+ })
+}
+
+func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.TemporalitySelector = selector
+ return cfg
+ })
+}
+
+func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.AggregationSelector = selector
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
new file mode 100644
index 0000000000..83f6d7fd1a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
@@ -0,0 +1,47 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import "time"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// RetrySettings defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type RetrySettings struct {
+ // Enabled indicates whether to not retry sending batches in case of export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
+ // consecutive retries will always be `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
+ // Once this value is reached, the data is discarded.
+ MaxElapsedTime time.Duration
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
new file mode 100644
index 0000000000..0229ac80be
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
@@ -0,0 +1,38 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "os"
+)
+
+// ReadTLSConfigFromFile reads a PEM certificate file and creates
+// a tls.Config that will use this certifate to verify a server certificate.
+func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return CreateTLSConfig(b)
+}
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
new file mode 100644
index 0000000000..50e25fdbc7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
new file mode 100644
index 0000000000..b552333dbb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
new file mode 100644
index 0000000000..2605c74d05
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
@@ -0,0 +1,144 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// AttrIter transforms an attribute iterator into OTLP key-values.
+func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*cpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
+ return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *cpb.AnyValue {
+ av := new(cpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &cpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &cpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &cpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &cpb.AnyValue_ArrayValue{
+ ArrayValue: &cpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &cpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*cpb.AnyValue {
+ converted := make([]*cpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &cpb.AnyValue{
+ Value: &cpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
new file mode 100644
index 0000000000..fb009ba21c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
@@ -0,0 +1,103 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+)
+
+var (
+ errUnknownAggregation = errors.New("unknown aggregation")
+ errUnknownTemporality = errors.New("unknown temporality")
+)
+
+type errMetric struct {
+ m *mpb.Metric
+ err error
+}
+
+func (e errMetric) Unwrap() error {
+ return e.err
+}
+
+func (e errMetric) Error() string {
+ format := "invalid metric (name: %q, description: %q, unit: %q): %s"
+ return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
+}
+
+func (e errMetric) Is(target error) bool {
+ return errors.Is(e.err, target)
+}
+
+// multiErr is used by the data-type transform functions to wrap multiple
+// errors into a single return value. The error message will show all errors
+// as a list and scope them by the datatype name that is returning them.
+type multiErr struct {
+ datatype string
+ errs []error
+}
+
+// errOrNil returns nil if e contains no errors, otherwise it returns e.
+func (e *multiErr) errOrNil() error {
+ if len(e.errs) == 0 {
+ return nil
+ }
+ return e
+}
+
+// append adds err to e. If err is a multiErr, its errs are flattened into e.
+func (e *multiErr) append(err error) {
+ // Do not use errors.As here, this should only be flattened one layer. If
+ // there is a *multiErr several steps down the chain, all the errors above
+ // it will be discarded if errors.As is used instead.
+ switch other := err.(type) {
+ case *multiErr:
+ // Flatten err errors into e.
+ e.errs = append(e.errs, other.errs...)
+ default:
+ e.errs = append(e.errs, err)
+ }
+}
+
+func (e *multiErr) Error() string {
+ es := make([]string, len(e.errs))
+ for i, err := range e.errs {
+ es[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred transforming %s:\n\t%s"
+ return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
+}
+
+func (e *multiErr) Unwrap() error {
+ switch len(e.errs) {
+ case 0:
+ return nil
+ case 1:
+ return e.errs[0]
+ }
+
+ // Return a multiErr without the leading error.
+ cp := &multiErr{
+ datatype: e.datatype,
+ errs: make([]error, len(e.errs)-1),
+ }
+ copy(cp.errs, e.errs[1:])
+ return cp
+}
+
+func (e *multiErr) Is(target error) bool {
+ if len(e.errs) == 0 {
+ return false
+ }
+ // Check if the first error is target.
+ return errors.Is(e.errs[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
new file mode 100644
index 0000000000..669e25e8e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
@@ -0,0 +1,352 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package transform provides transformation functionality from the
+// sdk/metric/metricdata data-types into OTLP data-types.
+package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
+
+import (
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ cpb "go.opentelemetry.io/proto/otlp/common/v1"
+ mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
+ rpb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
+// contains invalid ScopeMetrics, an error will be returned along with an OTLP
+// ResourceMetrics that contains partial OTLP ScopeMetrics.
+func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
+ sms, err := ScopeMetrics(rm.ScopeMetrics)
+ return &mpb.ResourceMetrics{
+ Resource: &rpb.Resource{
+ Attributes: AttrIter(rm.Resource.Iter()),
+ },
+ ScopeMetrics: sms,
+ SchemaUrl: rm.Resource.SchemaURL(),
+ }, err
+}
+
+// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
+// sms contains invalid metric values, an error will be returned along with a
+// slice that contains partial OTLP ScopeMetrics.
+func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
+ errs := &multiErr{datatype: "ScopeMetrics"}
+ out := make([]*mpb.ScopeMetrics, 0, len(sms))
+ for _, sm := range sms {
+ ms, err := Metrics(sm.Metrics)
+ if err != nil {
+ errs.append(err)
+ }
+
+ out = append(out, &mpb.ScopeMetrics{
+ Scope: &cpb.InstrumentationScope{
+ Name: sm.Scope.Name,
+ Version: sm.Scope.Version,
+ },
+ Metrics: ms,
+ SchemaUrl: sm.Scope.SchemaURL,
+ })
+ }
+ return out, errs.errOrNil()
+}
+
+// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
+// invalid metric values, an error will be returned along with a slice that
+// contains partial OTLP Metrics.
+func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
+ errs := &multiErr{datatype: "Metrics"}
+ out := make([]*mpb.Metric, 0, len(ms))
+ for _, m := range ms {
+ o, err := metric(m)
+ if err != nil {
+ // Do not include invalid data. Drop the metric, report the error.
+ errs.append(errMetric{m: o, err: err})
+ continue
+ }
+ out = append(out, o)
+ }
+ return out, errs.errOrNil()
+}
+
+func metric(m metricdata.Metrics) (*mpb.Metric, error) {
+ var err error
+ out := &mpb.Metric{
+ Name: m.Name,
+ Description: m.Description,
+ Unit: string(m.Unit),
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ out.Data = Gauge[int64](a)
+ case metricdata.Gauge[float64]:
+ out.Data = Gauge[float64](a)
+ case metricdata.Sum[int64]:
+ out.Data, err = Sum[int64](a)
+ case metricdata.Sum[float64]:
+ out.Data, err = Sum[float64](a)
+ case metricdata.Histogram[int64]:
+ out.Data, err = Histogram(a)
+ case metricdata.Histogram[float64]:
+ out.Data, err = Histogram(a)
+ case metricdata.ExponentialHistogram[int64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.ExponentialHistogram[float64]:
+ out.Data, err = ExponentialHistogram(a)
+ case metricdata.Summary:
+ out.Data = Summary(a)
+ default:
+ return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
+ }
+ return out, err
+}
+
+// Gauge returns an OTLP Metric_Gauge generated from g.
+func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
+ return &mpb.Metric_Gauge{
+ Gauge: &mpb.Gauge{
+ DataPoints: DataPoints(g.DataPoints),
+ },
+ }
+}
+
+// Sum returns an OTLP Metric_Sum generated from s. An error is returned
+// if the temporality of s is unknown.
+func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
+ t, err := Temporality(s.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Sum{
+ Sum: &mpb.Sum{
+ AggregationTemporality: t,
+ IsMonotonic: s.IsMonotonic,
+ DataPoints: DataPoints(s.DataPoints),
+ },
+ }, nil
+}
+
+// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
+func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
+ out := make([]*mpb.NumberDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ ndp := &mpb.NumberDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ switch v := any(dPt.Value).(type) {
+ case int64:
+ ndp.Value = &mpb.NumberDataPoint_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ ndp.Value = &mpb.NumberDataPoint_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, ndp)
+ }
+ return out
+}
+
+// Histogram returns an OTLP Metric_Histogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_Histogram{
+ Histogram: &mpb.Histogram{
+ AggregationTemporality: t,
+ DataPoints: HistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
+// from dPts.
+func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
+ out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ hdp := &mpb.HistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ BucketCounts: dPt.BucketCounts,
+ ExplicitBounds: dPt.Bounds,
+ Exemplars: Exemplars(dPt.Exemplars),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ hdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ hdp.Max = &vF64
+ }
+ out = append(out, hdp)
+ }
+ return out
+}
+
+// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
+// returned if the temporality of h is unknown.
+func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+ t, err := Temporality(h.Temporality)
+ if err != nil {
+ return nil, err
+ }
+ return &mpb.Metric_ExponentialHistogram{
+ ExponentialHistogram: &mpb.ExponentialHistogram{
+ AggregationTemporality: t,
+ DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
+ },
+ }, nil
+}
+
+// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
+// from dPts.
+func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+ out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sum := float64(dPt.Sum)
+ ehdp := &mpb.ExponentialHistogramDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: &sum,
+ Scale: dPt.Scale,
+ ZeroCount: dPt.ZeroCount,
+ Exemplars: Exemplars(dPt.Exemplars),
+
+ Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
+ Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
+ }
+ if v, ok := dPt.Min.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Min = &vF64
+ }
+ if v, ok := dPt.Max.Value(); ok {
+ vF64 := float64(v)
+ ehdp.Max = &vF64
+ }
+ out = append(out, ehdp)
+ }
+ return out
+}
+
+// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
+// from bucket.
+func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+ return &mpb.ExponentialHistogramDataPoint_Buckets{
+ Offset: bucket.Offset,
+ BucketCounts: bucket.Counts,
+ }
+}
+
+// Temporality returns an OTLP AggregationTemporality generated from t. If t
+// is unknown, an error is returned along with the invalid
+// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
+func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
+ switch t {
+ case metricdata.DeltaTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
+ case metricdata.CumulativeTemporality:
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
+ default:
+ err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
+ return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
+ }
+}
+
+// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC as uint64.
+// The result is undefined if the Unix time
+// in nanoseconds cannot be represented by an int64
+// (a date before the year 1678 or after 2262).
+// timeUnixNano on the zero Time returns 0.
+// The result does not depend on the location associated with t.
+func timeUnixNano(t time.Time) uint64 {
+ if t.IsZero() {
+ return 0
+ }
+ return uint64(t.UnixNano())
+}
+
+// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
+func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar {
+ out := make([]*mpb.Exemplar, 0, len(exemplars))
+ for _, exemplar := range exemplars {
+ e := &mpb.Exemplar{
+ FilteredAttributes: KeyValues(exemplar.FilteredAttributes),
+ TimeUnixNano: timeUnixNano(exemplar.Time),
+ SpanId: exemplar.SpanID,
+ TraceId: exemplar.TraceID,
+ }
+ switch v := any(exemplar.Value).(type) {
+ case int64:
+ e.Value = &mpb.Exemplar_AsInt{
+ AsInt: v,
+ }
+ case float64:
+ e.Value = &mpb.Exemplar_AsDouble{
+ AsDouble: v,
+ }
+ }
+ out = append(out, e)
+ }
+ return out
+}
+
+// Summary returns an OTLP Metric_Summary generated from s.
+func Summary(s metricdata.Summary) *mpb.Metric_Summary {
+ return &mpb.Metric_Summary{
+ Summary: &mpb.Summary{
+ DataPoints: SummaryDataPoints(s.DataPoints),
+ },
+ }
+}
+
+// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from
+// dPts.
+func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint {
+ out := make([]*mpb.SummaryDataPoint, 0, len(dPts))
+ for _, dPt := range dPts {
+ sdp := &mpb.SummaryDataPoint{
+ Attributes: AttrIter(dPt.Attributes.Iter()),
+ StartTimeUnixNano: timeUnixNano(dPt.StartTime),
+ TimeUnixNano: timeUnixNano(dPt.Time),
+ Count: dPt.Count,
+ Sum: dPt.Sum,
+ QuantileValues: QuantileValues(dPt.QuantileValues),
+ }
+ out = append(out, sdp)
+ }
+ return out
+}
+
+// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile
+// generated from quantiles.
+func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile {
+ out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles))
+ for _, q := range quantiles {
+ quantile := &mpb.SummaryDataPoint_ValueAtQuantile{
+ Quantile: q.Quantile,
+ Value: q.Value,
+ }
+ out = append(out, quantile)
+ }
+ return out
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
new file mode 100644
index 0000000000..99a5e9f04c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
+
+// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
+func Version() string {
+ return "1.27.0"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
similarity index 100%
rename from vendor/github.com/uber/jaeger-client-go/LICENSE
rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
new file mode 100644
index 0000000000..50802d5aee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
new file mode 100644
index 0000000000..3c1a625c06
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Client manages connections to the collector, handles the
+// transformation of data into wire format, and the transmission of that
+// data to the collector.
+type Client interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Start should establish connection(s) to endpoint(s). It is
+ // called just once by the exporter, so the implementation
+ // does not need to worry about idempotence and locking.
+ Start(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Stop should close the connections. The function is called
+ // only once by the exporter, so the implementation does not
+ // need to worry about idempotence, but it may be called
+ // concurrently with UploadTraces, so proper
+ // locking is required. The function serves as a
+ // synchronization point - after the function returns, the
+ // process of closing connections is assumed to be finished.
+ Stop(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // UploadTraces should transform the passed traces to the wire
+ // format and send it to the collector. May be called
+ // concurrently.
+ UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
new file mode 100644
index 0000000000..09ad5eadb6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
new file mode 100644
index 0000000000..3f0a518ae0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var errAlreadyStarted = errors.New("already started")
+
+// Exporter exports trace data in the OTLP wire format.
+type Exporter struct {
+ client Client
+
+ mu sync.RWMutex
+ started bool
+
+ startOnce sync.Once
+ stopOnce sync.Once
+}
+
+// ExportSpans exports a batch of spans.
+func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
+ protoSpans := tracetransform.Spans(ss)
+ if len(protoSpans) == 0 {
+ return nil
+ }
+
+ err := e.client.UploadTraces(ctx, protoSpans)
+ if err != nil {
+ return fmt.Errorf("traces export: %w", err)
+ }
+ return nil
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+ err := errAlreadyStarted
+ e.startOnce.Do(func() {
+ e.mu.Lock()
+ e.started = true
+ e.mu.Unlock()
+ err = e.client.Start(ctx)
+ })
+
+ return err
+}
+
+// Shutdown flushes all exports and closes all connections to the receiving endpoint.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ e.mu.RLock()
+ started := e.started
+ e.mu.RUnlock()
+
+ if !started {
+ return nil
+ }
+
+ var err error
+
+ e.stopOnce.Do(func() {
+ err = e.client.Stop(ctx)
+ e.mu.Lock()
+ e.started = false
+ e.mu.Unlock()
+ })
+
+ return err
+}
+
+var _ tracesdk.SpanExporter = (*Exporter)(nil)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, client Client) (*Exporter, error) {
+ exp := NewUnstarted(client)
+ if err := exp.Start(ctx); err != nil {
+ return nil, err
+ }
+ return exp, nil
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(client Client) *Exporter {
+ return &Exporter{
+ client: client,
+ }
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Client Client
+ }{
+ Type: "otlptrace",
+ Client: e.client,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
new file mode 100644
index 0000000000..4571a5ca39
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -0,0 +1,147 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+ return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+ return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+ av := new(commonpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &commonpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &commonpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
new file mode 100644
index 0000000000..f6dd3decc9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
+ if il == (instrumentation.Scope{}) {
+ return nil
+ }
+ return &commonpb.InstrumentationScope{
+ Name: il.Name,
+ Version: il.Version,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
new file mode 100644
index 0000000000..db7b698a56
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/resource"
+ resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// Resource transforms a Resource into an OTLP Resource.
+func Resource(r *resource.Resource) *resourcepb.Resource {
+ if r == nil {
+ return nil
+ }
+ return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
new file mode 100644
index 0000000000..c3c69c5a0d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -0,0 +1,207 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
+// ResourceSpans.
+func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
+ if len(sdl) == 0 {
+ return nil
+ }
+
+ rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
+
+ type key struct {
+ r attribute.Distinct
+ is instrumentation.Scope
+ }
+ ssm := make(map[key]*tracepb.ScopeSpans)
+
+ var resources int
+ for _, sd := range sdl {
+ if sd == nil {
+ continue
+ }
+
+ rKey := sd.Resource().Equivalent()
+ k := key{
+ r: rKey,
+ is: sd.InstrumentationScope(),
+ }
+ scopeSpan, iOk := ssm[k]
+ if !iOk {
+ // Either the resource or instrumentation scope were unknown.
+ scopeSpan = &tracepb.ScopeSpans{
+ Scope: InstrumentationScope(sd.InstrumentationScope()),
+ Spans: []*tracepb.Span{},
+ SchemaUrl: sd.InstrumentationScope().SchemaURL,
+ }
+ }
+ scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
+ ssm[k] = scopeSpan
+
+ rs, rOk := rsm[rKey]
+ if !rOk {
+ resources++
+ // The resource was unknown.
+ rs = &tracepb.ResourceSpans{
+ Resource: Resource(sd.Resource()),
+ ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
+ SchemaUrl: sd.Resource().SchemaURL(),
+ }
+ rsm[rKey] = rs
+ continue
+ }
+
+ // The resource has been seen before. Check if the instrumentation
+ // library lookup was unknown because if so we need to add it to the
+ // ResourceSpans. Otherwise, the instrumentation library has already
+ // been seen and the append we did above will be included it in the
+ // ScopeSpans reference.
+ if !iOk {
+ rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
+ }
+ }
+
+ // Transform the categorized map into a slice
+ rss := make([]*tracepb.ResourceSpans, 0, resources)
+ for _, rs := range rsm {
+ rss = append(rss, rs)
+ }
+ return rss
+}
+
+// span transforms a Span into an OTLP span.
+func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
+ if sd == nil {
+ return nil
+ }
+
+ tid := sd.SpanContext().TraceID()
+ sid := sd.SpanContext().SpanID()
+
+ s := &tracepb.Span{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ TraceState: sd.SpanContext().TraceState().String(),
+ Status: status(sd.Status().Code, sd.Status().Description),
+ StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
+ EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
+ Links: links(sd.Links()),
+ Kind: spanKind(sd.SpanKind()),
+ Name: sd.Name(),
+ Attributes: KeyValues(sd.Attributes()),
+ Events: spanEvents(sd.Events()),
+ DroppedAttributesCount: uint32(sd.DroppedAttributes()),
+ DroppedEventsCount: uint32(sd.DroppedEvents()),
+ DroppedLinksCount: uint32(sd.DroppedLinks()),
+ }
+
+ if psid := sd.Parent().SpanID(); psid.IsValid() {
+ s.ParentSpanId = psid[:]
+ }
+ s.Flags = buildSpanFlags(sd.Parent())
+
+ return s
+}
+
+// status transform a span code and message into an OTLP span status.
+func status(status codes.Code, message string) *tracepb.Status {
+ var c tracepb.Status_StatusCode
+ switch status {
+ case codes.Ok:
+ c = tracepb.Status_STATUS_CODE_OK
+ case codes.Error:
+ c = tracepb.Status_STATUS_CODE_ERROR
+ default:
+ c = tracepb.Status_STATUS_CODE_UNSET
+ }
+ return &tracepb.Status{
+ Code: c,
+ Message: message,
+ }
+}
+
+// links transforms span Links to OTLP span links.
+func links(links []tracesdk.Link) []*tracepb.Span_Link {
+ if len(links) == 0 {
+ return nil
+ }
+
+ sl := make([]*tracepb.Span_Link, 0, len(links))
+ for _, otLink := range links {
+ // This redefinition is necessary to prevent otLink.*ID[:] copies
+ // being reused -- in short we need a new otLink per iteration.
+ otLink := otLink
+
+ tid := otLink.SpanContext.TraceID()
+ sid := otLink.SpanContext.SpanID()
+
+ flags := buildSpanFlags(otLink.SpanContext)
+
+ sl = append(sl, &tracepb.Span_Link{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ Attributes: KeyValues(otLink.Attributes),
+ DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
+ Flags: flags,
+ })
+ }
+ return sl
+}
+
+func buildSpanFlags(sc trace.SpanContext) uint32 {
+ flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
+ if sc.IsRemote() {
+ flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+ }
+
+ return uint32(flags)
+}
+
+// spanEvents transforms span Events to an OTLP span events.
+func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
+ if len(es) == 0 {
+ return nil
+ }
+
+ events := make([]*tracepb.Span_Event, len(es))
+ // Transform message events
+ for i := 0; i < len(es); i++ {
+ events[i] = &tracepb.Span_Event{
+ Name: es[i].Name,
+ TimeUnixNano: uint64(es[i].Time.UnixNano()),
+ Attributes: KeyValues(es[i].Attributes),
+ DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
+ }
+ }
+ return events
+}
+
+// spanKind transforms a SpanKind to an OTLP span kind.
+func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return tracepb.Span_SPAN_KIND_INTERNAL
+ case trace.SpanKindClient:
+ return tracepb.Span_SPAN_KIND_CLIENT
+ case trace.SpanKindServer:
+ return tracepb.Span_SPAN_KIND_SERVER
+ case trace.SpanKindProducer:
+ return tracepb.Span_SPAN_KIND_PRODUCER
+ case trace.SpanKindConsumer:
+ return tracepb.Span_SPAN_KIND_CONSUMER
+ default:
+ return tracepb.Span_SPAN_KIND_UNSPECIFIED
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
similarity index 100%
rename from vendor/github.com/uber/jaeger-lib/LICENSE
rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
new file mode 100644
index 0000000000..5309bb7cb1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace gRPC Exporter
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
new file mode 100644
index 0000000000..3993df927d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -0,0 +1,295 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+type client struct {
+ endpoint string
+ dialOpts []grpc.DialOption
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // stopCtx is used as a parent context for all exports. Therefore, when it
+ // is canceled with the stopFunc all exports are canceled.
+ stopCtx context.Context
+ // stopFunc cancels stopCtx, stopping any active exports.
+ stopFunc context.CancelFunc
+
+ // ourConn keeps track of where conn was created: true if created here on
+ // Start, or false if passed with an option. This is important on Shutdown
+ // as the conn should only be closed if created here on start. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ tscMu sync.RWMutex
+ tsc coltracepb.TraceServiceClient
+}
+
+// Compile time check *client implements otlptrace.Client.
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new gRPC trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+ return newClient(opts...)
+}
+
+func newClient(opts ...Option) *client {
+ cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ c := &client{
+ endpoint: cfg.Traces.Endpoint,
+ exportTimeout: cfg.Traces.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ dialOpts: cfg.DialOptions,
+ stopCtx: ctx,
+ stopFunc: cancel,
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Traces.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Traces.Headers)
+ }
+
+ return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *client) Start(context.Context) error {
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
+ if err != nil {
+ return err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ // The otlptrace.Client interface states this method is called just once,
+ // so no need to check if already started.
+ c.tscMu.Lock()
+ c.tsc = coltracepb.NewTraceServiceClient(c.conn)
+ c.tscMu.Unlock()
+
+ return nil
+}
+
+var errAlreadyStopped = errors.New("the client is already stopped")
+
+// Stop shuts down the client.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadTraces method of the client. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All client held resources
+// will still be released in this situation.
+//
+// If the client has already stopped, an error will be returned describing
+// this.
+func (c *client) Stop(ctx context.Context) error {
+ // Make sure to return context error if the context is done when calling this method.
+ err := ctx.Err()
+
+ // Acquire the c.tscMu lock within the ctx lifetime.
+ acquired := make(chan struct{})
+ go func() {
+ c.tscMu.Lock()
+ close(acquired)
+ }()
+
+ select {
+ case <-ctx.Done():
+ // The Stop timeout is reached. Kill any remaining exports to force
+ // the clear of the lock and save the timeout error to return and
+ // signal the shutdown timed out before cleanly stopping.
+ c.stopFunc()
+ err = ctx.Err()
+
+ // To ensure the client is not left in a dirty state c.tsc needs to be
+ // set to nil. To avoid the race condition when doing this, ensure
+ // that all the exports are killed (initiated by c.stopFunc).
+ <-acquired
+ case <-acquired:
+ }
+ // Hold the tscMu lock for the rest of the function to ensure no new
+ // exports are started.
+ defer c.tscMu.Unlock()
+
+ // The otlptrace.Client interface states this method is called only
+ // once, but there is no guarantee it is called after Start. Ensure the
+ // client is started before doing anything and let the called know if they
+ // made a mistake.
+ if c.tsc == nil {
+ return errAlreadyStopped
+ }
+
+ // Clear c.tsc to signal the client is stopped.
+ c.tsc = nil
+
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ return err
+}
+
+var errShutdown = errors.New("the client is shutdown")
+
+// UploadTraces sends a batch of spans.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+ // Hold a read lock to ensure a shut down initiated after this starts does
+ // not abandon the export. This read lock acquire has less priority than a
+ // write lock acquire (i.e. Stop), meaning if the client is shutting down
+ // this will come after the shut down.
+ c.tscMu.RLock()
+ defer c.tscMu.RUnlock()
+
+ if c.tsc == nil {
+ return errShutdown
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
+ ResourceSpans: protoSpans,
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedSpans()
+ if n != 0 || msg != "" {
+ err := internal.TracePartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+ }
+
+ // Unify the client stopCtx with the parent.
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-c.stopCtx.Done():
+ // Cancel the export as the shutdown has timed out.
+ cancel()
+ }
+ }()
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *client) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ }{
+ Type: "otlphttpgrpc",
+ Endpoint: c.endpoint,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 0000000000..e783b57ac4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
new file mode 100644
index 0000000000..b826b84247
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+ return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+ return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000..9513c0a57c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,191 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+ name, err := url.PathUnescape(n)
+ if err != nil {
+ global.Error(err, "escape header key", "key", n)
+ continue
+ }
+ trimmedName := strings.TrimSpace(name)
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
new file mode 100644
index 0000000000..97cd6c54f7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
new file mode 100644
index 0000000000..7bb189a94b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -0,0 +1,142 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Traces.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ )
+
+ return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
new file mode 100644
index 0000000000..8f84a79963
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -0,0 +1,353 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+ // DefaultTracesPath is a default URL path for endpoint that
+ // receives spans.
+ DefaultTracesPath string = "/v1/traces"
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Traces SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = fmt.Sprintf("/%s", tmp)
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Priroritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Traces.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+ } else if cfg.Traces.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Traces.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Traces.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = endpoint
+ return cfg
+ })
+}
+
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ if u.Scheme != "https" {
+ cfg.Traces.Insecure = true
+ }
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Traces.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Timeout = duration
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
new file mode 100644
index 0000000000..3d4f699d47
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,40 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+ // MarshalProto tells the driver to send using the protobuf binary format.
+ MarshalProto Marshaler = iota
+ // MarshalJSON tells the driver to send using json format.
+ MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
new file mode 100644
index 0000000000..38b97a0131
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -0,0 +1,26 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
new file mode 100644
index 0000000000..a12ea4c48e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
new file mode 100644
index 0000000000..4f2113ae2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %s", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
new file mode 100644
index 0000000000..bbad0e6d01
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -0,0 +1,208 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+ applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+ converted := make([]otlpconfig.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying export of span batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, client security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4317" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
+// the Exporter will connect to. The provided endpoint URL should resemble
+// "https://example.com:4318/v1/traces".
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "https://localhost:4317/v1/traces" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+ if compressor == "gzip" {
+ return otlpconfig.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC client to use when sending
+// requests. Supported compressor values: "gzip".
+func WithCompressor(compressor string) Option {
+ return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.Traces.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The client
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time a client will attempt to export a
+// batch of spans. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of spans is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of spans.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
new file mode 100644
index 0000000000..fc7190d940
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
+func Version() string {
+ return "1.27.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
index 0c8ed20a59..3a0cc42f6a 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -281,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
+type sfGauge struct {
+ embedded.Float64Gauge
+
+ name string
+ opts []metric.Float64GaugeOption
+
+ delegate atomic.Value // metric.Float64Gauge
+}
+
+var _ metric.Float64Gauge = (*sfGauge)(nil)
+
+func (i *sfGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
+ }
+}
+
type siCounter struct {
embedded.Int64Counter
@@ -358,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
}
}
+
+type siGauge struct {
+ embedded.Int64Gauge
+
+ name string
+ opts []metric.Int64GaugeOption
+
+ delegate atomic.Value // metric.Int64Gauge
+}
+
+var _ metric.Int64Gauge = (*siGauge)(nil)
+
+func (i *siGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index f21898591e..590fa7385f 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -164,6 +164,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
return i, nil
}
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Int64Gauge(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &siGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
+}
+
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableCounter(name, options...)
@@ -230,6 +241,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram
return i, nil
}
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ if del, ok := m.delegate.Load().(metric.Meter); ok {
+ return del.Float64Gauge(name, options...)
+ }
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ i := &sfGauge{name: name, opts: options}
+ m.instruments = append(m.instruments, i)
+ return i, nil
+}
+
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableCounter(name, options...)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
index c7234f4bc8..cf23db7780 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -39,7 +39,7 @@ type Float64ObservableCounter interface {
}
// Float64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableCounterConfig struct {
description string
unit string
@@ -97,7 +97,7 @@ type Float64ObservableUpDownCounter interface {
}
// Float64ObservableUpDownCounterConfig contains options for asynchronous
-// counter instruments that record int64 values.
+// counter instruments that record float64 values.
type Float64ObservableUpDownCounterConfig struct {
description string
unit string
@@ -154,7 +154,7 @@ type Float64ObservableGauge interface {
}
// Float64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableGaugeConfig struct {
description string
unit string
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
index 15bebae084..1a9dc68093 100644
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -102,6 +102,16 @@ type Float64Counter interface{ float64Counter() }
// the API package).
type Float64Histogram interface{ float64Histogram() }
+// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64Gauge interface{ float64Gauge() }
+
// Float64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
//
@@ -174,6 +184,16 @@ type Int64Counter interface{ int64Counter() }
// the API package).
type Int64Histogram interface{ int64Histogram() }
+// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
+// a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64Gauge interface{ int64Gauge() }
+
// Int64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index 451413192a..ea52e40233 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -16,6 +16,7 @@ type InstrumentOption interface {
Int64CounterOption
Int64UpDownCounterOption
Int64HistogramOption
+ Int64GaugeOption
Int64ObservableCounterOption
Int64ObservableUpDownCounterOption
Int64ObservableGaugeOption
@@ -23,6 +24,7 @@ type InstrumentOption interface {
Float64CounterOption
Float64UpDownCounterOption
Float64HistogramOption
+ Float64GaugeOption
Float64ObservableCounterOption
Float64ObservableUpDownCounterOption
Float64ObservableGaugeOption
@@ -51,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.description = string(o)
return c
@@ -81,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.description = string(o)
return c
@@ -116,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.unit = string(o)
return c
@@ -146,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.unit = string(o)
return c
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 7aa82e0c17..460b3f9b08 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -58,6 +58,10 @@ type Meter interface {
// synchronously record the distribution of int64 measurements during a
// computational operation.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+ // Int64Gauge returns a new Int64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous int64 measurements during a computational operation.
+ Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
@@ -104,6 +108,10 @@ type Meter interface {
// synchronously record the distribution of float64 measurements during a
// computational operation.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+ // Float64Gauge returns a new Float64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous float64 measurements during a computational operation.
+ Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 0000000000..bb89694356
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 0000000000..ca6fcbdc09
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,281 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
index 5420d546eb..8403a4bad2 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -28,7 +28,7 @@ type Float64Counter interface {
}
// Float64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
+// record float64 values.
type Float64CounterConfig struct {
description string
unit string
@@ -81,7 +81,7 @@ type Float64UpDownCounter interface {
}
// Float64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64UpDownCounterConfig struct {
description string
unit string
@@ -133,8 +133,8 @@ type Float64Histogram interface {
Record(ctx context.Context, incr float64, options ...RecordOption)
}
-// Float64HistogramConfig contains options for synchronous counter instruments
-// that record int64 values.
+// Float64HistogramConfig contains options for synchronous histogram
+// instruments that record float64 values.
type Float64HistogramConfig struct {
description string
unit string
@@ -172,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Float64HistogramOption interface {
applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
}
+
+// Float64Gauge is an instrument that records instantaneous float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value float64, options ...RecordOption)
+}
+
+// Float64GaugeConfig contains options for synchronous gauge instruments that
+// record float64 values.
+type Float64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
+// applied.
+func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
+ var config Float64GaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64GaugeOption.
+type Float64GaugeOption interface {
+ applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
index 0dcbf06db9..783fdfba77 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -133,7 +133,7 @@ type Int64Histogram interface {
Record(ctx context.Context, incr int64, options ...RecordOption)
}
-// Int64HistogramConfig contains options for synchronous counter instruments
+// Int64HistogramConfig contains options for synchronous histogram instruments
// that record int64 values.
type Int64HistogramConfig struct {
description string
@@ -172,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Int64HistogramOption interface {
applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
}
+
+// Int64Gauge is an instrument that records instantaneous int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value int64, options ...RecordOption)
+}
+
+// Int64GaugeConfig contains options for synchronous gauge instruments that
+// record int64 values.
+type Int64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
+// applied.
+func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
+ var config Int64GaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Int64GaugeOption.
+type Int64GaugeOption interface {
+ applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
new file mode 100644
index 0000000000..8c5ac55ca9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -0,0 +1,24 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "ignorePaths": [],
+ "labels": ["Skip Changelog", "dependencies"],
+ "postUpdateOptions" : [
+ "gomodTidy"
+ ],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": true
+ },
+ {
+ "matchFileNames": ["internal/tools/**"],
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": false
+ }
+ ]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
new file mode 100644
index 0000000000..f81b1576ad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/README.md
@@ -0,0 +1,3 @@
+# SDK
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
new file mode 100644
index 0000000000..06e6d86854
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
@@ -0,0 +1,3 @@
+# SDK Instrumentation
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 0000000000..a4faa6a03d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 0000000000..f4d1857c4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+// Deprecated: please use Scope instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 0000000000..728115045b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+ // Name is the name of the instrumentation scope. This should be the
+ // Go package name of that scope.
+ Name string
+ // Version is the version of the instrumentation scope.
+ Version string
+ // SchemaURL of the telemetry emitted by the scope.
+ SchemaURL string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
new file mode 100644
index 0000000000..07923ed8d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -0,0 +1,166 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+
+import (
+ "os"
+ "strconv"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // BatchSpanProcessorScheduleDelayKey is the delay interval between two
+ // consecutive exports (i.e. 5000).
+ BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
+ // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
+ // export data (i.e. 3000).
+ BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
+ // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+ BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
+ // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+ // 512). Note: it must be less than or equal to
+ // BatchSpanProcessorMaxQueueSize.
+ BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+
+ // AttributeValueLengthKey is the maximum allowed attribute value size.
+ AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // AttributeCountKey is the maximum allowed span attribute count.
+ AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanAttributeValueLengthKey is the maximum allowed attribute value size
+ // for a span.
+ SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // SpanAttributeCountKey is the maximum allowed span attribute count for a
+ // span.
+ SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanEventCountKey is the maximum allowed span event count.
+ SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+
+ // SpanEventAttributeCountKey is the maximum allowed attribute per span
+ // event count.
+ SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanLinkCountKey is the maximum allowed span link count.
+ SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
+
+ // SpanLinkAttributeCountKey is the maximum allowed attribute per span
+ // link count.
+ SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+ for _, key := range keys {
+ value := os.Getenv(key)
+ if value == "" {
+ continue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+ }
+
+ return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+}
+
+// BatchSpanProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorScheduleDelay(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchSpanProcessorExportTimeout returns the environment variable value for
+// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorExportTimeout(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
+}
+
+// SpanAttributeValueLength returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
+// returned or defaultValue if that is not set.
+func SpanAttributeValueLength(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
+}
+
+// SpanAttributeCount returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
+// defaultValue if that is not set.
+func SpanAttributeCount(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
+}
+
+// SpanEventCount returns the environment variable value for the
+// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanEventCount(defaultValue int) int {
+ return IntEnvOr(SpanEventCountKey, defaultValue)
+}
+
+// SpanEventAttributeCount returns the environment variable value for the
+// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
+// is returned.
+func SpanEventAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
+}
+
+// SpanLinkCount returns the environment variable value for the
+// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkCountKey, defaultValue)
+}
+
+// SpanLinkAttributeCount returns the environment variable value for the
+// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
new file mode 100644
index 0000000000..1fc19d3fe3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
+//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
+//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
+
+//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
+//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
new file mode 100644
index 0000000000..a990092f9d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+import "time"
+
+// MonotonicEndTime returns the end time at present
+// but offset from start, monotonically.
+//
+// The monotonic clock is used in subtractions hence
+// the duration since start added back to start gives
+// end as a monotonic time.
+// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func MonotonicEndTime(start time.Time) time.Time {
+ return start.Add(time.Since(start))
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
new file mode 100644
index 0000000000..017f072a51
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
@@ -0,0 +1,3 @@
+# Metric SDK
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
new file mode 100644
index 0000000000..e6f5cfb2ad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+)
+
+// errAgg is wrapped by misconfigured aggregations.
+var errAgg = errors.New("aggregation")
+
+// Aggregation is the aggregation used to summarize recorded measurements.
+type Aggregation interface {
+ // copy returns a deep copy of the Aggregation.
+ copy() Aggregation
+
+ // err returns an error for any misconfigured Aggregation.
+ err() error
+}
+
+// AggregationDrop is an Aggregation that drops all recorded data.
+type AggregationDrop struct{} // AggregationDrop has no parameters.
+
+var _ Aggregation = AggregationDrop{}
+
+// copy returns a deep copy of d.
+func (d AggregationDrop) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A drop aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDrop) err() error { return nil }
+
+// AggregationDefault is an Aggregation that uses the default instrument kind selection
+// mapping to select another Aggregation. A metric reader can be configured to
+// make an aggregation selection based on instrument kind that differs from
+// the default. This Aggregation ensures the default is used.
+//
+// See the [DefaultAggregationSelector] for information about the default
+// instrument kind selection mapping.
+type AggregationDefault struct{} // AggregationDefault has no parameters.
+
+var _ Aggregation = AggregationDefault{}
+
+// copy returns a deep copy of d.
+func (d AggregationDefault) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A default aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDefault) err() error { return nil }
+
+// AggregationSum is an Aggregation that summarizes a set of measurements as their
+// arithmetic sum.
+type AggregationSum struct{} // AggregationSum has no parameters.
+
+var _ Aggregation = AggregationSum{}
+
+// copy returns a deep copy of s.
+func (s AggregationSum) copy() Aggregation { return s }
+
+// err returns an error for any misconfiguration. A sum aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationSum) err() error { return nil }
+
+// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
+// last one made.
+type AggregationLastValue struct{} // AggregationLastValue has no parameters.
+
+var _ Aggregation = AggregationLastValue{}
+
+// copy returns a deep copy of l.
+func (l AggregationLastValue) copy() Aggregation { return l }
+
+// err returns an error for any misconfiguration. A last-value aggregation has
+// no parameters and cannot be misconfigured, therefore this always returns
+// nil.
+func (AggregationLastValue) err() error { return nil }
+
+// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with explicitly defined buckets.
+type AggregationExplicitBucketHistogram struct {
+ // Boundaries are the increasing bucket boundary values. Boundary values
+ // define bucket upper bounds. Buckets are exclusive of their lower
+ // boundary and inclusive of their upper bound (except at positive
+ // infinity). A measurement is defined to fall into the greatest-numbered
+ // bucket with a boundary that is greater than or equal to the
+ // measurement. As an example, boundaries defined as:
+ //
+ // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
+ //
+ // Will define these buckets:
+ //
+ // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
+ // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
+ // (500.0, 1000.0], (1000.0, +∞)
+ Boundaries []float64
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationExplicitBucketHistogram{}
+
+// errHist is returned by misconfigured ExplicitBucketHistograms.
+var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
+
+// err returns an error for any misconfiguration.
+func (h AggregationExplicitBucketHistogram) err() error {
+ if len(h.Boundaries) <= 1 {
+ return nil
+ }
+
+ // Check boundaries are monotonic.
+ i := h.Boundaries[0]
+ for _, j := range h.Boundaries[1:] {
+ if i >= j {
+ return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
+ }
+ i = j
+ }
+
+ return nil
+}
+
+// copy returns a deep copy of h.
+func (h AggregationExplicitBucketHistogram) copy() Aggregation {
+ return AggregationExplicitBucketHistogram{
+ Boundaries: slices.Clone(h.Boundaries),
+ NoMinMax: h.NoMinMax,
+ }
+}
+
+// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with bucket widths that grow exponentially.
+type AggregationBase2ExponentialHistogram struct {
+ // MaxSize is the maximum number of buckets to use for the histogram.
+ MaxSize int32
+ // MaxScale is the maximum resolution scale to use for the histogram.
+ //
+ // MaxScale has a maximum value of 20. Using a value of 20 means the
+ // maximum number of buckets that can fit within the range of a
+ // signed 32-bit integer index could be used.
+ //
+ // MaxScale has a minimum value of -10. Using a value of -10 means only
+ // two buckets will be used.
+ MaxScale int32
+
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationBase2ExponentialHistogram{}
+
+// copy returns a deep copy of the Aggregation.
+func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
+ return e
+}
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+)
+
+// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
+var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
+
+// err returns an error for any misconfigured Aggregation.
+func (e AggregationBase2ExponentialHistogram) err() error {
+ if e.MaxScale > expoMaxScale {
+ return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
+ }
+ if e.MaxSize <= 0 {
+ return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
new file mode 100644
index 0000000000..63b88f0866
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "sync"
+)
+
+// cache is a locking storage used to quickly return already computed values.
+//
+// The zero value of a cache is empty and ready to use.
+//
+// A cache must not be copied after first use.
+//
+// All methods of a cache are safe to call concurrently.
+type cache[K comparable, V any] struct {
+ sync.Mutex
+ data map[K]V
+}
+
+// Lookup returns the value stored in the cache with the associated key if it
+// exists. Otherwise, f is called and its returned value is set in the cache
+// for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cache lock, so f
+// should not block excessively.
+func (c *cache[K, V]) Lookup(key K, f func() V) V {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.data == nil {
+ val := f()
+ c.data = map[K]V{key: val}
+ return val
+ }
+ if v, ok := c.data[key]; ok {
+ return v
+ }
+ val := f()
+ c.data[key] = val
+ return val
+}
+
+// HasKey returns true if Lookup has previously been called with that key
+//
+// HasKey is safe to call concurrently.
+func (c *cache[K, V]) HasKey(key K) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.data[key]
+ return ok
+}
+
+// cacheWithErr is a locking storage used to quickly return already computed values and an error.
+//
+// The zero value of a cacheWithErr is empty and ready to use.
+//
+// A cacheWithErr must not be copied after first use.
+//
+// All methods of a cacheWithErr are safe to call concurrently.
+type cacheWithErr[K comparable, V any] struct {
+ cache[K, valAndErr[V]]
+}
+
+type valAndErr[V any] struct {
+ val V
+ err error
+}
+
+// Lookup returns the value stored in the cacheWithErr with the associated key
+// if it exists. Otherwise, f is called and its returned value is set in the
+// cacheWithErr for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
+// should not block excessively.
+func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
+ combined := c.cache.Lookup(key, func() valAndErr[V] {
+ val, err := f()
+ return valAndErr[V]{val: val, err: err}
+ })
+ return combined.val, combined.err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
new file mode 100644
index 0000000000..9a41f94e97
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
@@ -0,0 +1,137 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// config contains configuration options for a MeterProvider.
+type config struct {
+ res *resource.Resource
+ readers []Reader
+ views []View
+}
+
+// readerSignals returns a force-flush and shutdown function for a
+// MeterProvider to call in their respective options. All Readers c contains
+// will have their force-flush and shutdown methods unified into returned
+// single functions.
+func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
+ var fFuncs, sFuncs []func(context.Context) error
+ for _, r := range c.readers {
+ sFuncs = append(sFuncs, r.Shutdown)
+ if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
+ fFuncs = append(fFuncs, f.ForceFlush)
+ }
+ }
+
+ return unify(fFuncs), unifyShutdown(sFuncs)
+}
+
+// unify unifies calling all of funcs into a single function call. All errors
+// returned from calls to funcs will be unify into a single error return
+// value.
+func unify(funcs []func(context.Context) error) func(context.Context) error {
+ return func(ctx context.Context) error {
+ var errs []error
+ for _, f := range funcs {
+ if err := f(ctx); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return unifyErrors(errs)
+ }
+}
+
+// unifyErrors combines multiple errors into a single error.
+func unifyErrors(errs []error) error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// unifyShutdown unifies calling all of funcs once for a shutdown. If called
+// more than once, an ErrReaderShutdown error is returned.
+func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
+ f := unify(funcs)
+ var once sync.Once
+ return func(ctx context.Context) error {
+ err := ErrReaderShutdown
+ once.Do(func() { err = f(ctx) })
+ return err
+ }
+}
+
+// newConfig returns a config configured with options.
+func newConfig(options []Option) config {
+ conf := config{res: resource.Default()}
+ for _, o := range options {
+ conf = o.apply(conf)
+ }
+ return conf
+}
+
+// Option applies a configuration option value to a MeterProvider.
+type Option interface {
+ apply(config) config
+}
+
+// optionFunc applies a set of options to a config.
+type optionFunc func(config) config
+
+// apply returns a config with option(s) applied.
+func (o optionFunc) apply(conf config) config {
+ return o(conf)
+}
+
+// WithResource associates a Resource with a MeterProvider. This Resource
+// represents the entity producing telemetry and is associated with all Meters
+// the MeterProvider will create.
+//
+// By default, if this Option is not used, the default Resource from the
+// go.opentelemetry.io/otel/sdk/resource package will be used.
+func WithResource(res *resource.Resource) Option {
+ return optionFunc(func(conf config) config {
+ conf.res = res
+ return conf
+ })
+}
+
+// WithReader associates Reader r with a MeterProvider.
+//
+// By default, if this option is not used, the MeterProvider will perform no
+// operations; no data will be exported without a Reader.
+func WithReader(r Reader) Option {
+ return optionFunc(func(cfg config) config {
+ if r == nil {
+ return cfg
+ }
+ cfg.readers = append(cfg.readers, r)
+ return cfg
+ })
+}
+
+// WithView associates views a MeterProvider.
+//
+// Views are appended to existing ones in a MeterProvider if this option is
+// used multiple times.
+//
+// By default, if this option is not used, the MeterProvider will use the
+// default view.
+func WithView(views ...View) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.views = append(cfg.views, views...)
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
new file mode 100644
index 0000000000..4f553a5715
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package metric provides an implementation of the OpenTelemetry metrics SDK.
+//
+// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
+// about the concept of OpenTelemetry metrics and
+// https://opentelemetry.io/docs/concepts/components/ for more information
+// about OpenTelemetry SDKs.
+//
+// The entry point for the metric package is the MeterProvider. It is the
+// object that all API calls use to create Meters, instruments, and ultimately
+// make metric measurements. Also, it is an object that should be used to
+// control the life-cycle (start, flush, and shutdown) of the SDK.
+//
+// A MeterProvider needs to be configured to export the measured data, this is
+// done by configuring it with a Reader implementation (using the WithReader
+// MeterProviderOption). Readers take two forms: ones that push to an endpoint
+// (NewPeriodicReader), and ones that an endpoint pulls from. See
+// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
+// or with these Readers.
+//
+// Each Reader, when registered with the MeterProvider, can be augmented with a
+// View. Views allow users that run OpenTelemetry instrumented code to modify
+// the generated data of that instrumentation.
+//
+// The data generated by a MeterProvider needs to include information about its
+// origin. A MeterProvider needs to be configured with a Resource, using the
+// WithResource MeterProviderOption, to include this information. This Resource
+// should be used to describe the unique runtime environment instrumented code
+// is being run on. That way when multiple instances of the code are collected
+// at a single endpoint their origin is decipherable.
+//
+// See [go.opentelemetry.io/otel/metric] for more information about
+// the metric API.
+//
+// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
+// the experimental features.
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
new file mode 100644
index 0000000000..a6c403797f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "strconv"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // The time interval (in milliseconds) between the start of two export attempts.
+ envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
+ // Maximum allowed time (in milliseconds) to export data.
+ envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
+)
+
+// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
+// or the defaultValue if the environment variable is not defined or the value is not valid.
+func envDuration(key string, defaultValue time.Duration) time.Duration {
+ v := os.Getenv(key)
+ if v == "" {
+ return defaultValue
+ }
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ if d <= 0 {
+ global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ return time.Duration(d) * time.Millisecond
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
new file mode 100644
index 0000000000..c774a4684f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "runtime"
+ "slices"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+)
+
+// reservoirFunc returns the appropriately configured exemplar reservoir
+// creation func based on the passed InstrumentKind and user defined
+// environment variables.
+//
+// Note: This will only return non-nil values when the experimental exemplar
+// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
+// is not set to always_off.
+func reservoirFunc(agg Aggregation) func() exemplar.Reservoir {
+ if !x.Exemplars.Enabled() {
+ return nil
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
+ resF := func() func() exemplar.Reservoir {
+ // Explicit bucket histogram aggregation with more than 1 bucket will
+ // use AlignedHistogramBucketExemplarReservoir.
+ a, ok := agg.(AggregationExplicitBucketHistogram)
+ if ok && len(a.Boundaries) > 0 {
+ cp := slices.Clone(a.Boundaries)
+ return func() exemplar.Reservoir {
+ bounds := cp
+ return exemplar.Histogram(bounds)
+ }
+ }
+
+ var n int
+ if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
+ // Base2 Exponential Histogram Aggregation SHOULD use a
+ // SimpleFixedSizeExemplarReservoir with a reservoir equal to the
+ // smaller of the maximum number of buckets configured on the
+ // aggregation or twenty (e.g. min(20, max_buckets)).
+ n = int(a.MaxSize)
+ if n > 20 {
+ n = 20
+ }
+ } else {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+ // This Exemplar reservoir MAY take a configuration parameter for
+ // the size of the reservoir. If no size configuration is
+ // provided, the default size MAY be the number of possible
+ // concurrent threads (e.g. number of CPUs) to help reduce
+ // contention. Otherwise, a default size of 1 SHOULD be used.
+ n = runtime.NumCPU()
+ if n < 1 {
+ // Should never be the case, but be defensive.
+ n = 1
+ }
+ }
+
+ return func() exemplar.Reservoir {
+ return exemplar.FixedSize(n)
+ }
+ }
+
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
+ const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
+
+ switch os.Getenv(filterEnvKey) {
+ case "always_on":
+ return resF()
+ case "always_off":
+ return exemplar.Drop
+ case "trace_based":
+ fallthrough
+ default:
+ newR := resF()
+ return func() exemplar.Reservoir {
+ return exemplar.SampledFilter(newR())
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
new file mode 100644
index 0000000000..1a3cccb677
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ErrExporterShutdown is returned if Export or Shutdown are called after an
+// Exporter has been Shutdown.
+var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
+
+// Exporter handles the delivery of metric data to external receivers. This is
+// the final component in the metric push pipeline.
+type Exporter interface {
+ // Temporality returns the Temporality to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Temporality(InstrumentKind) metricdata.Temporality
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Aggregation returns the Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Aggregation(InstrumentKind) Aggregation
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Export serializes and transmits metric data to a receiver.
+ //
+ // This is called synchronously, there is no concurrency safety
+ // requirement. Because of this, it is critical that all timeouts and
+ // cancellations of the passed context be honored.
+ //
+ // All retry logic must be contained in this function. The SDK does not
+ // implement any retry logic. All errors returned by this function are
+ // considered unrecoverable and will be reported to a configured error
+ // Handler.
+ //
+ // The passed ResourceMetrics may be reused when the call completes. If an
+ // exporter needs to hold this data after it returns, it needs to make a
+ // copy.
+ Export(context.Context, *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush flushes any metric data held by an exporter.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // This method needs to be concurrent safe.
+ ForceFlush(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric data held by an exporter and releases any
+ // held computational resources.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // After Shutdown is called, calls to Export will perform no operation and
+ // instead will return an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
new file mode 100644
index 0000000000..f9768fd11c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -0,0 +1,350 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+var (
+ zeroInstrumentKind InstrumentKind
+ zeroScope instrumentation.Scope
+)
+
+// InstrumentKind is the identifier of a group of instruments that all
+// performing the same function.
+type InstrumentKind uint8
+
+const (
+ // instrumentKindUndefined is an undefined instrument kind, it should not
+ // be used by any initialized type.
+ instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused
+ // InstrumentKindCounter identifies a group of instruments that record
+ // increasing values synchronously with the code path they are measuring.
+ InstrumentKindCounter InstrumentKind = 1
+ // InstrumentKindUpDownCounter identifies a group of instruments that
+ // record increasing and decreasing values synchronously with the code path
+ // they are measuring.
+ InstrumentKindUpDownCounter InstrumentKind = 2
+ // InstrumentKindHistogram identifies a group of instruments that record a
+ // distribution of values synchronously with the code path they are
+ // measuring.
+ InstrumentKindHistogram InstrumentKind = 3
+ // InstrumentKindObservableCounter identifies a group of instruments that
+ // record increasing values in an asynchronous callback.
+ InstrumentKindObservableCounter InstrumentKind = 4
+ // InstrumentKindObservableUpDownCounter identifies a group of instruments
+ // that record increasing and decreasing values in an asynchronous
+ // callback.
+ InstrumentKindObservableUpDownCounter InstrumentKind = 5
+ // InstrumentKindObservableGauge identifies a group of instruments that
+ // record current values in an asynchronous callback.
+ InstrumentKindObservableGauge InstrumentKind = 6
+ // InstrumentKindGauge identifies a group of instruments that record
+ // instantaneous values synchronously with the code path they are
+ // measuring.
+ InstrumentKindGauge InstrumentKind = 7
+)
+
+type nonComparable [0]func() // nolint: unused // This is indeed used.
+
+// Instrument describes properties an instrument is created with.
+type Instrument struct {
+ // Name is the human-readable identifier of the instrument.
+ Name string
+ // Description describes the purpose of the instrument.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of measurement recorded by the instrument.
+ Unit string
+ // Scope identifies the instrumentation that created the instrument.
+ Scope instrumentation.Scope
+
+ // Ensure forward compatibility if non-comparable fields need to be added.
+ nonComparable // nolint: unused
+}
+
+// empty returns if all fields of i are their zero-value.
+func (i Instrument) empty() bool {
+ return i.Name == "" &&
+ i.Description == "" &&
+ i.Kind == zeroInstrumentKind &&
+ i.Unit == "" &&
+ i.Scope == zeroScope
+}
+
+// matches returns whether all the non-zero-value fields of i match the
+// corresponding fields of other. If i is empty it will match all other, and
+// true will always be returned.
+func (i Instrument) matches(other Instrument) bool {
+ return i.matchesName(other) &&
+ i.matchesDescription(other) &&
+ i.matchesKind(other) &&
+ i.matchesUnit(other) &&
+ i.matchesScope(other)
+}
+
+// matchesName returns true if the Name of i is "" or it equals the Name of
+// other, otherwise false.
+func (i Instrument) matchesName(other Instrument) bool {
+ return i.Name == "" || i.Name == other.Name
+}
+
+// matchesDescription returns true if the Description of i is "" or it equals
+// the Description of other, otherwise false.
+func (i Instrument) matchesDescription(other Instrument) bool {
+ return i.Description == "" || i.Description == other.Description
+}
+
+// matchesKind returns true if the Kind of i is its zero-value or it equals the
+// Kind of other, otherwise false.
+func (i Instrument) matchesKind(other Instrument) bool {
+ return i.Kind == zeroInstrumentKind || i.Kind == other.Kind
+}
+
+// matchesUnit returns true if the Unit of i is its zero-value or it equals the
+// Unit of other, otherwise false.
+func (i Instrument) matchesUnit(other Instrument) bool {
+ return i.Unit == "" || i.Unit == other.Unit
+}
+
+// matchesScope returns true if the Scope of i is its zero-value or it equals
+// the Scope of other, otherwise false.
+func (i Instrument) matchesScope(other Instrument) bool {
+ return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
+ (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
+ (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
+}
+
+// Stream describes the stream of data an instrument produces.
+type Stream struct {
+ // Name is the human-readable identifier of the stream.
+ Name string
+ // Description describes the purpose of the data.
+ Description string
+ // Unit is the unit of measurement recorded.
+ Unit string
+ // Aggregation the stream uses for an instrument.
+ Aggregation Aggregation
+ // AttributeFilter is an attribute Filter applied to the attributes
+ // recorded for an instrument's measurement. If the filter returns false
+ // the attribute will not be recorded, otherwise, if it returns true, it
+ // will record the attribute.
+ //
+ // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
+ // provide an allow-list of attribute keys here.
+ AttributeFilter attribute.Filter
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // Name is the name of the stream.
+ Name string
+ // Description is the description of the stream.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of the stream.
+ Unit string
+ // Number is the number type of the stream.
+ Number string
+}
+
+// Returns a normalized copy of the instID i.
+//
+// Instrument names are considered case-insensitive. Standardize the instrument
+// name to always be lowercase for the returned instID so it can be compared
+// without the name casing affecting the comparison.
+func (i instID) normalize() instID {
+ i.Name = strings.ToLower(i.Name)
+ return i
+}
+
+type int64Inst struct {
+ measures []aggregate.Measure[int64]
+
+ embedded.Int64Counter
+ embedded.Int64UpDownCounter
+ embedded.Int64Histogram
+ embedded.Int64Gauge
+}
+
+var (
+ _ metric.Int64Counter = (*int64Inst)(nil)
+ _ metric.Int64UpDownCounter = (*int64Inst)(nil)
+ _ metric.Int64Histogram = (*int64Inst)(nil)
+ _ metric.Int64Gauge = (*int64Inst)(nil)
+)
+
+func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+type float64Inst struct {
+ measures []aggregate.Measure[float64]
+
+ embedded.Float64Counter
+ embedded.Float64UpDownCounter
+ embedded.Float64Histogram
+ embedded.Float64Gauge
+}
+
+var (
+ _ metric.Float64Counter = (*float64Inst)(nil)
+ _ metric.Float64UpDownCounter = (*float64Inst)(nil)
+ _ metric.Float64Histogram = (*float64Inst)(nil)
+ _ metric.Float64Gauge = (*float64Inst)(nil)
+)
+
+func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+// observablID is a comparable unique identifier of an observable.
+type observablID[N int64 | float64] struct {
+ name string
+ description string
+ kind InstrumentKind
+ unit string
+ scope instrumentation.Scope
+}
+
+type float64Observable struct {
+ metric.Float64Observable
+ *observable[float64]
+
+ embedded.Float64ObservableCounter
+ embedded.Float64ObservableUpDownCounter
+ embedded.Float64ObservableGauge
+}
+
+var (
+ _ metric.Float64ObservableCounter = float64Observable{}
+ _ metric.Float64ObservableUpDownCounter = float64Observable{}
+ _ metric.Float64ObservableGauge = float64Observable{}
+)
+
+func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
+ return float64Observable{
+ observable: newObservable[float64](m, kind, name, desc, u),
+ }
+}
+
+type int64Observable struct {
+ metric.Int64Observable
+ *observable[int64]
+
+ embedded.Int64ObservableCounter
+ embedded.Int64ObservableUpDownCounter
+ embedded.Int64ObservableGauge
+}
+
+var (
+ _ metric.Int64ObservableCounter = int64Observable{}
+ _ metric.Int64ObservableUpDownCounter = int64Observable{}
+ _ metric.Int64ObservableGauge = int64Observable{}
+)
+
+func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
+ return int64Observable{
+ observable: newObservable[int64](m, kind, name, desc, u),
+ }
+}
+
+type observable[N int64 | float64] struct {
+ metric.Observable
+ observablID[N]
+
+ meter *meter
+ measures measures[N]
+ dropAggregation bool
+}
+
+func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
+ return &observable[N]{
+ observablID: observablID[N]{
+ name: name,
+ description: desc,
+ kind: kind,
+ unit: u,
+ scope: m.scope,
+ },
+ meter: m,
+ }
+}
+
+// observe records the val for the set of attrs.
+func (o *observable[N]) observe(val N, s attribute.Set) {
+ o.measures.observe(val, s)
+}
+
+func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
+ o.measures = append(o.measures, meas...)
+}
+
+type measures[N int64 | float64] []aggregate.Measure[N]
+
+// observe records the val for the set of attrs.
+func (m measures[N]) observe(val N, s attribute.Set) {
+ for _, in := range m {
+ in(context.Background(), val, s)
+ }
+}
+
+var errEmptyAgg = errors.New("no aggregators for observable instrument")
+
+// registerable returns an error if the observable o should not be registered,
+// and nil if it should. An errEmptyAgg error is returned if o is effectively a
+// no-op because it does not have any aggregators. Also, an error is returned
+// if scope defines a Meter other than the one o was created by.
+func (o *observable[N]) registerable(m *meter) error {
+ if len(o.measures) == 0 {
+ return errEmptyAgg
+ }
+ if m != o.meter {
+ return fmt.Errorf(
+ "invalid registration: observable %q from Meter %q, registered with Meter %q",
+ o.name,
+ o.scope.Name,
+ m.scope.Name,
+ )
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
new file mode 100644
index 0000000000..25ea6244e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[instrumentKindUndefined-0]
+ _ = x[InstrumentKindCounter-1]
+ _ = x[InstrumentKindUpDownCounter-2]
+ _ = x[InstrumentKindHistogram-3]
+ _ = x[InstrumentKindObservableCounter-4]
+ _ = x[InstrumentKindObservableUpDownCounter-5]
+ _ = x[InstrumentKindObservableGauge-6]
+ _ = x[InstrumentKindGauge-7]
+}
+
+const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge"
+
+var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112}
+
+func (i InstrumentKind) String() string {
+ if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
+ return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
new file mode 100644
index 0000000000..c9976de6c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -0,0 +1,154 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// now is used to return the current local time while allowing tests to
+// override the default time.Now function.
+var now = time.Now
+
+// Measure receives measurements to be aggregated.
+type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
+
+// ComputeAggregation stores the aggregate of measurements into dest and
+// returns the number of aggregate data-points output.
+type ComputeAggregation func(dest *metricdata.Aggregation) int
+
+// Builder builds an aggregate function.
+type Builder[N int64 | float64] struct {
+ // Temporality is the temporality used for the returned aggregate function.
+ //
+ // If this is not provided a default of cumulative will be used (except for
+ // the last-value aggregate function where delta is the only appropriate
+ // temporality).
+ Temporality metricdata.Temporality
+ // Filter is the attribute filter the aggregate function will use on the
+ // input of measurements.
+ Filter attribute.Filter
+ // ReservoirFunc is the factory function used by aggregate functions to
+ // create new exemplar reservoirs for a new seen attribute set.
+ //
+ // If this is not provided a default factory function that returns an
+ // exemplar.Drop reservoir will be used.
+ ReservoirFunc func() exemplar.Reservoir
+ // AggregationLimit is the cardinality limit of measurement attributes. Any
+ // measurement for new attributes once the limit has been reached will be
+ // aggregated into a single aggregate for the "otel.metric.overflow"
+ // attribute.
+ //
+ // If AggregationLimit is less than or equal to zero there will not be an
+ // aggregation limit imposed (i.e. unlimited attribute sets).
+ AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func() exemplar.Reservoir {
+ if b.ReservoirFunc != nil {
+ return b.ReservoirFunc
+ }
+
+ return exemplar.Drop
+}
+
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
+ if b.Filter != nil {
+ fltr := b.Filter // Copy to make it immutable after assignment.
+ return func(ctx context.Context, n N, a attribute.Set) {
+ fAttr, dropped := a.Filter(fltr)
+ f(ctx, n, fAttr, dropped)
+ }
+ }
+ return func(ctx context.Context, n N, a attribute.Set) {
+ f(ctx, n, a, nil)
+ }
+}
+
+// LastValue returns a last-value aggregate function input and output.
+func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
+ lv := newLastValue[N](b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(lv.measure), lv.delta
+ default:
+ return b.filter(lv.measure), lv.cumulative
+ }
+}
+
+// PrecomputedLastValue returns a last-value aggregate function input and
+// output. The aggregation returned from the returned ComputeAggregation
+// function will always only return values from the previous collection cycle.
+func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) {
+ lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(lv.measure), lv.delta
+ default:
+ return b.filter(lv.measure), lv.cumulative
+ }
+}
+
+// PrecomputedSum returns a sum aggregate function input and output. The
+// arguments passed to the input are expected to be the precomputed sum values.
+func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// Sum returns a sum aggregate function input and output.
+func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// ExplicitBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+ h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// ExponentialBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
+ h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// reset ensures s has capacity and sets it length. If the capacity of s too
+// small, a new slice is returned with the specified capacity and length.
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
new file mode 100644
index 0000000000..7b7225e6ef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package aggregate provides aggregate types used compute aggregations and
+// cycle the state of metric measurements made by the SDK. These types and
+// functionality are meant only for internal SDK use.
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
new file mode 100644
index 0000000000..170ae8e58e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+var exemplarPool = sync.Pool{
+ New: func() any { return new([]exemplar.Exemplar) },
+}
+
+func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
+ dest := exemplarPool.Get().(*[]exemplar.Exemplar)
+ defer func() {
+ *dest = (*dest)[:0]
+ exemplarPool.Put(dest)
+ }()
+
+ *dest = reset(*dest, len(*out), cap(*out))
+
+ f(dest)
+
+ *out = reset(*out, len(*dest), cap(*dest))
+ for i, e := range *dest {
+ (*out)[i].FilteredAttributes = e.FilteredAttributes
+ (*out)[i].Time = e.Time
+ (*out)[i].SpanID = e.SpanID
+ (*out)[i].TraceID = e.TraceID
+
+ switch e.Value.Type() {
+ case exemplar.Int64ValueType:
+ (*out)[i].Value = N(e.Value.Int64())
+ case exemplar.Float64ValueType:
+ (*out)[i].Value = N(e.Value.Float64())
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
new file mode 100644
index 0000000000..902074b5bf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -0,0 +1,444 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+
+ smallestNonZeroNormalFloat64 = 0x1p-1022
+
+ // These redefine the Math constants with a type, so the compiler won't coerce
+ // them into an int on 32 bit platforms.
+ maxInt64 int64 = math.MaxInt64
+ minInt64 int64 = math.MinInt64
+)
+
+// expoHistogramDataPoint is a single data point in an exponential histogram.
+type expoHistogramDataPoint[N int64 | float64] struct {
+ attrs attribute.Set
+ res exemplar.Reservoir
+
+ count uint64
+ min N
+ max N
+ sum N
+
+ maxSize int
+ noMinMax bool
+ noSum bool
+
+ scale int
+
+ posBuckets expoBuckets
+ negBuckets expoBuckets
+ zeroCount uint64
+}
+
+func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
+ f := math.MaxFloat64
+ max := N(f) // if N is int64, max will overflow to -9223372036854775808
+ min := N(-f)
+ if N(maxInt64) > N(f) {
+ max = N(maxInt64)
+ min = N(minInt64)
+ }
+ return &expoHistogramDataPoint[N]{
+ attrs: attrs,
+ min: max,
+ max: min,
+ maxSize: maxSize,
+ noMinMax: noMinMax,
+ noSum: noSum,
+ scale: maxScale,
+ }
+}
+
+// record adds a new measurement to the histogram. It will rescale the buckets if needed.
+func (p *expoHistogramDataPoint[N]) record(v N) {
+ p.count++
+
+ if !p.noMinMax {
+ if v < p.min {
+ p.min = v
+ }
+ if v > p.max {
+ p.max = v
+ }
+ }
+ if !p.noSum {
+ p.sum += v
+ }
+
+ absV := math.Abs(float64(v))
+
+ if float64(absV) == 0.0 {
+ p.zeroCount++
+ return
+ }
+
+ bin := p.getBin(absV)
+
+ bucket := &p.posBuckets
+ if v < 0 {
+ bucket = &p.negBuckets
+ }
+
+ // If the new bin would make the counts larger than maxScale, we need to
+ // downscale current measurements.
+ if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
+ if p.scale-scaleDelta < expoMinScale {
+ // With a scale of -10 there is only two buckets for the whole range of float64 values.
+ // This can only happen if there is a max size of 1.
+ otel.Handle(errors.New("exponential histogram scale underflow"))
+ return
+ }
+ // Downscale
+ p.scale -= scaleDelta
+ p.posBuckets.downscale(scaleDelta)
+ p.negBuckets.downscale(scaleDelta)
+
+ bin = p.getBin(absV)
+ }
+
+ bucket.record(bin)
+}
+
+// getBin returns the bin v should be recorded into.
+func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
+ frac, exp := math.Frexp(v)
+ if p.scale <= 0 {
+ // Because of the choice of fraction is always 1 power of two higher than we want.
+ correction := 1
+ if frac == .5 {
+ // If v is an exact power of two the frac will be .5 and the exp
+ // will be one higher than we want.
+ correction = 2
+ }
+ return (exp - correction) >> (-p.scale)
+ }
+ return exp<= bin {
+ low = bin
+ high = startBin + length - 1
+ }
+
+ count := 0
+ for high-low >= p.maxSize {
+ low = low >> 1
+ high = high >> 1
+ count++
+ if count > expoMaxScale-expoMinScale {
+ return count
+ }
+ }
+ return count
+}
+
+// expoBuckets is a set of buckets in an exponential histogram.
+type expoBuckets struct {
+ startBin int
+ counts []uint64
+}
+
+// record increments the count for the given bin, and expands the buckets if needed.
+// Size changes must be done before calling this function.
+func (b *expoBuckets) record(bin int) {
+ if len(b.counts) == 0 {
+ b.counts = []uint64{1}
+ b.startBin = bin
+ return
+ }
+
+ endBin := b.startBin + len(b.counts) - 1
+
+ // if the new bin is inside the current range
+ if bin >= b.startBin && bin <= endBin {
+ b.counts[bin-b.startBin]++
+ return
+ }
+ // if the new bin is before the current start add spaces to the counts
+ if bin < b.startBin {
+ origLen := len(b.counts)
+ newLength := endBin - bin + 1
+ shift := b.startBin - bin
+
+ if newLength > cap(b.counts) {
+ b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
+ }
+
+ copy(b.counts[shift:origLen+shift], b.counts[:])
+ b.counts = b.counts[:newLength]
+ for i := 1; i < shift; i++ {
+ b.counts[i] = 0
+ }
+ b.startBin = bin
+ b.counts[0] = 1
+ return
+ }
+ // if the new is after the end add spaces to the end
+ if bin > endBin {
+ if bin-b.startBin < cap(b.counts) {
+ b.counts = b.counts[:bin-b.startBin+1]
+ for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
+ b.counts[i] = 0
+ }
+ b.counts[bin-b.startBin] = 1
+ return
+ }
+
+ end := make([]uint64, bin-b.startBin-len(b.counts)+1)
+ b.counts = append(b.counts, end...)
+ b.counts[bin-b.startBin] = 1
+ }
+}
+
+// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
+// correct lower resolution bucket.
+func (b *expoBuckets) downscale(delta int) {
+ // Example
+ // delta = 2
+ // Original offset: -6
+ // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
+ // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
+ // new Offset: -2
+ // new Counts: [4, 14, 30, 10]
+
+ if len(b.counts) <= 1 || delta < 1 {
+ b.startBin = b.startBin >> delta
+ return
+ }
+
+ steps := 1 << delta
+ offset := b.startBin % steps
+ offset = (offset + steps) % steps // to make offset positive
+ for i := 1; i < len(b.counts); i++ {
+ idx := i + offset
+ if idx%steps == 0 {
+ b.counts[idx/steps] = b.counts[i]
+ continue
+ }
+ b.counts[idx/steps] += b.counts[i]
+ }
+
+ lastIdx := (len(b.counts) - 1 + offset) / steps
+ b.counts = b.counts[:lastIdx+1]
+ b.startBin = b.startBin >> delta
+}
+
+// newExponentialHistogram returns an Aggregator that summarizes a set of
+// measurements as an exponential histogram. Each histogram is scoped by attributes
+// and the aggregation cycle the measurements were made in.
+func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *expoHistogram[N] {
+ return &expoHistogram[N]{
+ noSum: noSum,
+ noMinMax: noMinMax,
+ maxSize: int(maxSize),
+ maxScale: int(maxScale),
+
+ newRes: r,
+ limit: newLimiter[*expoHistogramDataPoint[N]](limit),
+ values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]),
+
+ start: now(),
+ }
+}
+
+// expoHistogram summarizes a set of measurements as an histogram with exponentially
+// defined buckets.
+type expoHistogram[N int64 | float64] struct {
+ noSum bool
+ noMinMax bool
+ maxSize int
+ maxScale int
+
+ newRes func() exemplar.Reservoir
+ limit limiter[*expoHistogramDataPoint[N]]
+ values map[attribute.Distinct]*expoHistogramDataPoint[N]
+ valuesMu sync.Mutex
+
+ start time.Time
+}
+
+func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ // Ignore NaN and infinity.
+ if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
+ return
+ }
+
+ t := now()
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ attr := e.limit.Attributes(fltrAttr, e.values)
+ v, ok := e.values[attr.Equivalent()]
+ if !ok {
+ v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes()
+
+ e.values[attr.Equivalent()] = v
+ }
+ v.record(value)
+ v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr)
+}
+
+func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range e.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Scale = int32(val.scale)
+ hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
+ copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = val.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(e.values)
+
+ e.start = t
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
+
+func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range e.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Scale = int32(val.scale)
+ hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
+ hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
+ copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
+ hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = val.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
new file mode 100644
index 0000000000..213baf50f5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -0,0 +1,235 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "slices"
+ "sort"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type buckets[N int64 | float64] struct {
+ attrs attribute.Set
+ res exemplar.Reservoir
+
+ counts []uint64
+ count uint64
+ total N
+ min, max N
+}
+
+// newBuckets returns buckets with n bins.
+func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] {
+ return &buckets[N]{attrs: attrs, counts: make([]uint64, n)}
+}
+
+func (b *buckets[N]) sum(value N) { b.total += value }
+
+func (b *buckets[N]) bin(idx int, value N) {
+ b.counts[idx]++
+ b.count++
+ if value < b.min {
+ b.min = value
+ } else if value > b.max {
+ b.max = value
+ }
+}
+
+// histValues summarizes a set of measurements as an histValues with
+// explicitly defined buckets.
+type histValues[N int64 | float64] struct {
+ noSum bool
+ bounds []float64
+
+ newRes func() exemplar.Reservoir
+ limit limiter[*buckets[N]]
+ values map[attribute.Distinct]*buckets[N]
+ valuesMu sync.Mutex
+}
+
+func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir) *histValues[N] {
+ // The responsibility of keeping all buckets correctly associated with the
+ // passed boundaries is ultimately this type's responsibility. Make a copy
+ // here so we can always guarantee this. Or, in the case of failure, have
+ // complete control over the fix.
+ b := slices.Clone(bounds)
+ slices.Sort(b)
+ return &histValues[N]{
+ noSum: noSum,
+ bounds: b,
+ newRes: r,
+ limit: newLimiter[*buckets[N]](limit),
+ values: make(map[attribute.Distinct]*buckets[N]),
+ }
+}
+
+// Aggregate records the measurement value, scoped by attr, and aggregates it
+// into a histogram.
+func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ // This search will return an index in the range [0, len(s.bounds)], where
+ // it will return len(s.bounds) if value is greater than the last element
+ // of s.bounds. This aligns with the buckets in that the length of buckets
+ // is len(s.bounds)+1, with the last bucket representing:
+ // (s.bounds[len(s.bounds)-1], +∞).
+ idx := sort.SearchFloat64s(s.bounds, float64(value))
+
+ t := now()
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ b, ok := s.values[attr.Equivalent()]
+ if !ok {
+ // N+1 buckets. For example:
+ //
+ // bounds = [0, 5, 10]
+ //
+ // Then,
+ //
+ // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+ b = newBuckets[N](attr, len(s.bounds)+1)
+ b.res = s.newRes()
+
+ // Ensure min and max are recorded values (not zero), for new buckets.
+ b.min, b.max = value, value
+ s.values[attr.Equivalent()] = b
+ }
+ b.bin(idx, value)
+ if !s.noSum {
+ b.sum(value)
+ }
+ b.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr)
+}
+
+// newHistogram returns an Aggregator that summarizes a set of measurements as
+// an histogram.
+func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *histogram[N] {
+ return &histogram[N]{
+ histValues: newHistValues[N](boundaries, noSum, limit, r),
+ noMinMax: noMinMax,
+ start: now(),
+ }
+}
+
+// histogram summarizes a set of measurements as an histogram with explicitly
+// defined buckets.
+type histogram[N int64 | float64] struct {
+ *histValues[N]
+
+ noMinMax bool
+ start time.Time
+}
+
+func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := slices.Clone(s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Bounds = bounds
+ hDPts[i].BucketCounts = val.counts
+
+ if !s.noSum {
+ hDPts[i].Sum = val.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+ // The delta collection cycle resets.
+ s.start = t
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
+
+func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := slices.Clone(s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Bounds = bounds
+
+ // The HistogramDataPoint field values returned need to be copies of
+ // the buckets value as we will keep updating them.
+ //
+ // TODO (#3047): Making copies for bounds and counts incurs a large
+ // memory allocation footprint. Alternatives should be explored.
+ hDPts[i].BucketCounts = slices.Clone(val.counts)
+
+ if !s.noSum {
+ hDPts[i].Sum = val.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
new file mode 100644
index 0000000000..8f406dd2bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -0,0 +1,162 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// datapoint is timestamped measurement data.
+type datapoint[N int64 | float64] struct {
+ attrs attribute.Set
+ timestamp time.Time
+ value N
+ res exemplar.Reservoir
+}
+
+func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *lastValue[N] {
+ return &lastValue[N]{
+ newRes: r,
+ limit: newLimiter[datapoint[N]](limit),
+ values: make(map[attribute.Distinct]datapoint[N]),
+ start: now(),
+ }
+}
+
+// lastValue summarizes a set of measurements as the last one made.
+type lastValue[N int64 | float64] struct {
+ sync.Mutex
+
+ newRes func() exemplar.Reservoir
+ limit limiter[datapoint[N]]
+ values map[attribute.Distinct]datapoint[N]
+ start time.Time
+}
+
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ d, ok := s.values[attr.Equivalent()]
+ if !ok {
+ d.res = s.newRes()
+ }
+
+ d.attrs = attr
+ d.timestamp = t
+ d.value = value
+ d.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr)
+
+ s.values[attr.Equivalent()] = d
+}
+
+func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int {
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints)
+ // Do not report stale values.
+ clear(s.values)
+ // Update start time for delta temporality.
+ s.start = now()
+
+ *dest = gData
+
+ return n
+}
+
+func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int {
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints)
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ *dest = gData
+
+ return n
+}
+
+// copyDpts copies the datapoints held by s into dest. The number of datapoints
+// copied is returned.
+func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int {
+ n := len(s.values)
+ *dest = reset(*dest, n, n)
+
+ var i int
+ for _, v := range s.values {
+ (*dest)[i].Attributes = v.attrs
+ (*dest)[i].StartTime = s.start
+ (*dest)[i].Time = v.timestamp
+ (*dest)[i].Value = v.value
+ collectExemplars(&(*dest)[i].Exemplars, v.res.Collect)
+ i++
+ }
+ return n
+}
+
+// newPrecomputedLastValue returns an aggregator that summarizes a set of
+// observations as the last one made.
+func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *precomputedLastValue[N] {
+ return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
+}
+
+// precomputedLastValue summarizes a set of observations as the last one made.
+type precomputedLastValue[N int64 | float64] struct {
+ *lastValue[N]
+}
+
+func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int {
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints)
+ // Do not report stale values.
+ clear(s.values)
+ // Update start time for delta temporality.
+ s.start = now()
+
+ *dest = gData
+
+ return n
+}
+
+func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int {
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints)
+ // Do not report stale values.
+ clear(s.values)
+ *dest = gData
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 0000000000..9ea0251edd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+ // aggLimit is the maximum number of metric streams that can be aggregated.
+ //
+ // Any metric stream with attributes distinct from any set already
+ // aggregated once the aggLimit will be meet will instead be aggregated
+ // into an "overflow" metric stream. That stream will only contain the
+ // "otel.metric.overflow"=true attribute.
+ aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+ return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set {
+ if l.aggLimit > 0 {
+ _, exists := measurements[attrs.Equivalent()]
+ if !exists && len(measurements) >= l.aggLimit-1 {
+ return overflowSet
+ }
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
new file mode 100644
index 0000000000..babe76aba9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -0,0 +1,240 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type sumValue[N int64 | float64] struct {
+ n N
+ res exemplar.Reservoir
+ attrs attribute.Set
+}
+
+// valueMap is the storage for sums.
+type valueMap[N int64 | float64] struct {
+ sync.Mutex
+ newRes func() exemplar.Reservoir
+ limit limiter[sumValue[N]]
+ values map[attribute.Distinct]sumValue[N]
+}
+
+func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir) *valueMap[N] {
+ return &valueMap[N]{
+ newRes: r,
+ limit: newLimiter[sumValue[N]](limit),
+ values: make(map[attribute.Distinct]sumValue[N]),
+ }
+}
+
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ v, ok := s.values[attr.Equivalent()]
+ if !ok {
+ v.res = s.newRes()
+ }
+
+ v.attrs = attr
+ v.n += value
+ v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr)
+
+ s.values[attr.Equivalent()] = v
+}
+
+// newSum returns an aggregator that summarizes a set of measurements as their
+// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
+// the measurements were made in.
+func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *sum[N] {
+ return &sum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// sum summarizes a set of measurements made as their arithmetic sum.
+type sum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+}
+
+func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ dPts[i].Attributes = val.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+ i++
+ }
+ // Do not report stale values.
+ clear(s.values)
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, value := range s.values {
+ dPts[i].Attributes = value.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = value.n
+ collectExemplars(&dPts[i].Exemplars, value.res.Collect)
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ i++
+ }
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+// newPrecomputedSum returns an aggregator that summarizes a set of
+// observatrions as their arithmetic sum. Each sum is scoped by attributes and
+// the aggregation cycle the measurements were made in.
+func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *precomputedSum[N] {
+ return &precomputedSum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// precomputedSum summarizes a set of observatrions as their arithmetic sum.
+type precomputedSum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+
+ reported map[attribute.Distinct]N
+}
+
+func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
+ t := now()
+ newReported := make(map[attribute.Distinct]N)
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for key, value := range s.values {
+ delta := value.n - s.reported[key]
+
+ dPts[i].Attributes = value.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = delta
+ collectExemplars(&dPts[i].Exemplars, value.res.Collect)
+
+ newReported[key] = value.n
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+ s.reported = newReported
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ dPts[i].Attributes = val.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
new file mode 100644
index 0000000000..5394f48e0d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package exemplar provides an implementation of the OpenTelemetry exemplar
+// reservoir to be used in metric collection pipelines.
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
new file mode 100644
index 0000000000..bf21e45dfa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Drop returns a [Reservoir] that drops all measurements it is offered.
+func Drop() Reservoir { return &dropRes{} }
+
+type dropRes struct{}
+
+// Offer does nothing, all measurements offered will be dropped.
+func (r *dropRes) Offer(context.Context, time.Time, Value, []attribute.KeyValue) {}
+
+// Collect resets dest. No exemplars will ever be returned.
+func (r *dropRes) Collect(dest *[]Exemplar) {
+ *dest = (*dest)[:0]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go
new file mode 100644
index 0000000000..fcaa6a4697
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar struct {
+ // FilteredAttributes are the attributes recorded with the measurement but
+ // filtered out of the timeseries' aggregated data.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was recorded.
+ Time time.Time
+ // Value is the measured value.
+ Value Value
+ // SpanID is the ID of the span that was active during the measurement. If
+ // no span was active or the span was not sampled this will be empty.
+ SpanID []byte `json:",omitempty"`
+ // TraceID is the ID of the trace the active span belonged to during the
+ // measurement. If no span was active or the span was not sampled this will
+ // be empty.
+ TraceID []byte `json:",omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
new file mode 100644
index 0000000000..d96aacc281
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements
+// to r if the passed context associated with the measurement contains a sampled
+// [go.opentelemetry.io/otel/trace.SpanContext].
+func SampledFilter(r Reservoir) Reservoir {
+ return filtered{Reservoir: r}
+}
+
+type filtered struct {
+ Reservoir
+}
+
+func (f filtered) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
+ if trace.SpanContextFromContext(ctx).IsSampled() {
+ f.Reservoir.Offer(ctx, t, n, a)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
new file mode 100644
index 0000000000..a6ff86d027
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
@@ -0,0 +1,46 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "slices"
+ "sort"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Histogram returns a [Reservoir] that samples the last measurement that falls
+// within a histogram bucket. The histogram bucket upper-boundaries are define
+// by bounds.
+//
+// The passed bounds will be sorted by this function.
+func Histogram(bounds []float64) Reservoir {
+ slices.Sort(bounds)
+ return &histRes{
+ bounds: bounds,
+ storage: newStorage(len(bounds) + 1),
+ }
+}
+
+type histRes struct {
+ *storage
+
+ // bounds are bucket bounds in ascending order.
+ bounds []float64
+}
+
+func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
+ var x float64
+ switch v.Type() {
+ case Int64ValueType:
+ x = float64(v.Int64())
+ case Float64ValueType:
+ x = v.Float64()
+ default:
+ panic("unknown value type")
+ }
+ r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
new file mode 100644
index 0000000000..6753e11664
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go
@@ -0,0 +1,183 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// rng is used to make sampling decisions.
+//
+// Do not use crypto/rand. There is no reason for the decrease in performance
+// given this is not a security sensitive decision.
+var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// random returns, as a float64, a uniform pseudo-random number in the open
+// interval (0.0,1.0).
+func random() float64 {
+ // TODO: This does not return a uniform number. rng.Float64 returns a
+ // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
+ // returns multiples of 2^-53, and not all floating point numbers between 0
+ // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand
+ // are always going to be 0).
+ //
+ // An alternative algorithm should be considered that will actually return
+ // a uniform number in the interval (0,1). For example, since the default
+ // rand source provides a uniform distribution for Int63, this can be
+ // converted following the prototypical code of Mersenne Twister 64 (Takuji
+ // Nishimura and Makoto Matsumoto:
+ // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c)
+ //
+ // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0)
+ //
+ // There are likely many other methods to explore here as well.
+
+ f := rng.Float64()
+ for f == 0 {
+ f = rng.Float64()
+ }
+ return f
+}
+
+// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
+// are k or less measurements made, the Reservoir will sample each one. If
+// there are more than k, the Reservoir will then randomly sample all
+// additional measurement with a decreasing probability.
+func FixedSize(k int) Reservoir {
+ r := &randRes{storage: newStorage(k)}
+ r.reset()
+ return r
+}
+
+type randRes struct {
+ *storage
+
+ // count is the number of measurement seen.
+ count int64
+ // next is the next count that will store a measurement at a random index
+ // once the reservoir has been filled.
+ next int64
+ // w is the largest random number in a distribution that is used to compute
+ // the next next.
+ w float64
+}
+
+func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
+ // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
+ // 1994). "Reservoir-Sampling Algorithms of Time Complexity
+ // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
+ // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
+ //
+ // A high-level overview of "Algorithm L":
+ // 0) Pre-calculate the random count greater than the storage size when
+ // an exemplar will be replaced.
+ // 1) Accept all measurements offered until the configured storage size is
+ // reached.
+ // 2) Loop:
+ // a) When the pre-calculate count is reached, replace a random
+ // existing exemplar with the offered measurement.
+ // b) Calculate the next random count greater than the existing one
+ // which will replace another exemplars
+ //
+ // The way a "replacement" count is computed is by looking at `n` number of
+ // independent random numbers each corresponding to an offered measurement.
+ // Of these numbers the smallest `k` (the same size as the storage
+ // capacity) of them are kept as a subset. The maximum value in this
+ // subset, called `w` is used to weight another random number generation
+ // for the next count that will be considered.
+ //
+ // By weighting the next count computation like described, it is able to
+ // perform a uniformly-weighted sampling algorithm based on the number of
+ // samples the reservoir has seen so far. The sampling will "slow down" as
+ // more and more samples are offered so as to reduce a bias towards those
+ // offered just prior to the end of the collection.
+ //
+ // This algorithm is preferred because of its balance of simplicity and
+ // performance. It will compute three random numbers (the bulk of
+ // computation time) for each item that becomes part of the reservoir, but
+ // it does not spend any time on items that do not. In particular it has an
+ // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
+ // measurements offered and k is the reservoir size.
+ //
+ // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
+ // this and other reservoir sampling algorithms. See
+ // https://github.com/MrAlias/reservoir-sampling for a performance
+ // comparison of reservoir sampling algorithms.
+
+ if int(r.count) < cap(r.store) {
+ r.store[r.count] = newMeasurement(ctx, t, n, a)
+ } else {
+ if r.count == r.next {
+ // Overwrite a random existing measurement with the one offered.
+ idx := int(rng.Int63n(int64(cap(r.store))))
+ r.store[idx] = newMeasurement(ctx, t, n, a)
+ r.advance()
+ }
+ }
+ r.count++
+}
+
+// reset resets r to the initial state.
+func (r *randRes) reset() {
+ // This resets the number of exemplars known.
+ r.count = 0
+ // Random index inserts should only happen after the storage is full.
+ r.next = int64(cap(r.store))
+
+ // Initial random number in the series used to generate r.next.
+ //
+ // This is set before r.advance to reset or initialize the random number
+ // series. Without doing so it would always be 0 or never restart a new
+ // random number series.
+ //
+ // This maps the uniform random number in (0,1) to a geometric distribution
+ // over the same interval. The mean of the distribution is inversely
+ // proportional to the storage capacity.
+ r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
+
+ r.advance()
+}
+
+// advance updates the count at which the offered measurement will overwrite an
+// existing exemplar.
+func (r *randRes) advance() {
+ // Calculate the next value in the random number series.
+ //
+ // The current value of r.w is based on the max of a distribution of random
+ // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
+ // of the storage and each `u` in the interval (0,w)). To calculate the
+ // next r.w we use the fact that when the next exemplar is selected to be
+ // included in the storage an existing one will be dropped, and the
+ // corresponding random number in the set used to calculate r.w will also
+ // be replaced. The replacement random number will also be within (0,w),
+ // therefore the next r.w will be based on the same distribution (i.e.
+ // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
+ // computing the next random number `u` and take r.w as `w * u^(1/k)`.
+ r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
+ // Use the new random number in the series to calculate the count of the
+ // next measurement that will be stored.
+ //
+ // Given 0 < r.w < 1, each iteration will result in subsequent r.w being
+ // smaller. This translates here into the next next being selected against
+ // a distribution with a higher mean (i.e. the expected value will increase
+ // and replacements become less likely)
+ //
+ // Important to note, the new r.next will always be at least 1 more than
+ // the last r.next.
+ r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
+}
+
+func (r *randRes) Collect(dest *[]Exemplar) {
+ r.storage.Collect(dest)
+ // Call reset here even though it will reset r.count and restart the random
+ // number series. This will persist any old exemplars as long as no new
+ // measurements are offered, but it will also prioritize those new
+ // measurements that are made over the older collection cycle ones.
+ r.reset()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
new file mode 100644
index 0000000000..80fa59554f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go
@@ -0,0 +1,32 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Reservoir holds the sampled exemplar of measurements made.
+type Reservoir interface {
+ // Offer accepts the parameters associated with a measurement. The
+ // parameters will be stored as an exemplar if the Reservoir decides to
+ // sample the measurement.
+ //
+ // The passed ctx needs to contain any baggage or span that were active
+ // when the measurement was made. This information may be used by the
+ // Reservoir in making a sampling decision.
+ //
+ // The time t is the time when the measurement was made. The val and attr
+ // parameters are the value and dropped (filtered) attributes of the
+ // measurement respectively.
+ Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue)
+
+ // Collect returns all the held exemplars.
+ //
+ // The Reservoir state is preserved after this call.
+ Collect(dest *[]Exemplar)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
new file mode 100644
index 0000000000..10b2976f79
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// storage is an exemplar storage for [Reservoir] implementations.
+type storage struct {
+ // store are the measurements sampled.
+ //
+ // This does not use []metricdata.Exemplar because it potentially would
+ // require an allocation for trace and span IDs in the hot path of Offer.
+ store []measurement
+}
+
+func newStorage(n int) *storage {
+ return &storage{store: make([]measurement, n)}
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *storage) Collect(dest *[]Exemplar) {
+ *dest = reset(*dest, len(r.store), len(r.store))
+ var n int
+ for _, m := range r.store {
+ if !m.valid {
+ continue
+ }
+
+ m.Exemplar(&(*dest)[n])
+ n++
+ }
+ *dest = (*dest)[:n]
+}
+
+// measurement is a measurement made by a telemetry system.
+type measurement struct {
+ // FilteredAttributes are the attributes dropped during the measurement.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was made.
+ Time time.Time
+ // Value is the value of the measurement.
+ Value Value
+ // SpanContext is the SpanContext active when a measurement was made.
+ SpanContext trace.SpanContext
+
+ valid bool
+}
+
+// newMeasurement returns a new non-empty Measurement.
+func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement {
+ return measurement{
+ FilteredAttributes: droppedAttr,
+ Time: ts,
+ Value: v,
+ SpanContext: trace.SpanContextFromContext(ctx),
+ valid: true,
+ }
+}
+
+// Exemplar returns m as an [Exemplar].
+func (m measurement) Exemplar(dest *Exemplar) {
+ dest.FilteredAttributes = m.FilteredAttributes
+ dest.Time = m.Time
+ dest.Value = m.Value
+
+ if m.SpanContext.HasTraceID() {
+ traceID := m.SpanContext.TraceID()
+ dest.TraceID = traceID[:]
+ } else {
+ dest.TraceID = dest.TraceID[:0]
+ }
+
+ if m.SpanContext.HasSpanID() {
+ spanID := m.SpanContext.SpanID()
+ dest.SpanID = spanID[:]
+ } else {
+ dest.SpanID = dest.SpanID[:0]
+ }
+}
+
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go
new file mode 100644
index 0000000000..9daf27dc00
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go
@@ -0,0 +1,57 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
+
+import "math"
+
+// ValueType identifies the type of value used in exemplar data.
+type ValueType uint8
+
+const (
+ // UnknownValueType should not be used. It represents a misconfigured
+ // Value.
+ UnknownValueType ValueType = 0
+ // Int64ValueType represents a Value with int64 data.
+ Int64ValueType ValueType = 1
+ // Float64ValueType represents a Value with float64 data.
+ Float64ValueType ValueType = 2
+)
+
+// Value is the value of data held by an exemplar.
+type Value struct {
+ t ValueType
+ val uint64
+}
+
+// NewValue returns a new [Value] for the provided value.
+func NewValue[N int64 | float64](value N) Value {
+ switch v := any(value).(type) {
+ case int64:
+ return Value{t: Int64ValueType, val: uint64(v)}
+ case float64:
+ return Value{t: Float64ValueType, val: math.Float64bits(v)}
+ }
+ return Value{}
+}
+
+// Type returns the [ValueType] of data held by v.
+func (v Value) Type() ValueType { return v.t }
+
+// Int64 returns the value of v as an int64. If the ValueType of v is not an
+// Int64ValueType, 0 is returned.
+func (v Value) Int64() int64 {
+ if v.t == Int64ValueType {
+ return int64(v.val)
+ }
+ return 0
+}
+
+// Float64 returns the value of v as an float64. If the ValueType of v is not
+// an Float64ValueType, 0 is returned.
+func (v Value) Float64() float64 {
+ if v.t == Float64ValueType {
+ return math.Float64frombits(v.val)
+ }
+ return 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
new file mode 100644
index 0000000000..19ec6806ff
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
+
+// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
+// equal to n. Otherwise, it returns a new []T with capacity equal to n.
+func ReuseSlice[T any](slice []T, n int) []T {
+ if cap(slice) >= n {
+ return slice[:n]
+ }
+ return make([]T, n)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
new file mode 100644
index 0000000000..aba69d6547
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
@@ -0,0 +1,112 @@
+# Experimental Features
+
+The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Cardinality Limit](#cardinality-limit)
+- [Exemplars](#exemplars)
+
+### Cardinality Limit
+
+The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value.
+The value must be an integer value.
+All other values are ignored.
+
+If the value set is less than or equal to `0`, no limit will be applied.
+
+#### Examples
+
+Set the cardinality limit to 2000.
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=2000
+```
+
+Set an infinite cardinality limit (functionally equivalent to disabling the feature).
+
+```console
+export OTEL_GO_X_CARDINALITY_LIMIT=-1
+```
+
+Disable the cardinality limit.
+
+```console
+unset OTEL_GO_X_CARDINALITY_LIMIT
+```
+
+### Exemplars
+
+A sample of measurements made may be exported directly as a set of exemplars.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
+The value of must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+Exemplar filters are a supported.
+The exemplar filter applies to all measurements made.
+They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
+
+To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
+The value must be the case-sensitive string defined by the [OpenTelemetry specification].
+
+- `"always_on"`: allows all measurements
+- `"always_off"`: denies all measurements
+- `"trace_based"`: allows only sampled measurements
+
+All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
+
+#### Examples
+
+Enable exemplars to be exported.
+
+```console
+export OTEL_GO_X_EXEMPLAR=true
+```
+
+Disable exemplars from being exported.
+
+```console
+unset OTEL_GO_X_EXEMPLAR
+```
+
+Set the exemplar filter to allow all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_on
+```
+
+Set the exemplar filter to deny all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_off
+```
+
+Set the exemplar filter to only allow sampled measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
+```
+
+Revert to the default exemplar filter (`"trace_based"`)
+
+```console
+unset OTEL_METRICS_EXEMPLAR_FILTER
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
new file mode 100644
index 0000000000..8cd2f37417
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
+
+import (
+ "os"
+ "strconv"
+ "strings"
+)
+
+var (
+ // Exemplars is an experimental feature flag that defines if exemplars
+ // should be recorded for metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
+ // to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+ // will also enable this).
+ Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+ })
+
+ // CardinalityLimit is an experimental feature flag that defines if
+ // cardinality limits should be applied to the recorded metric data-points.
+ //
+ // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
+ // variable to the integer limit value you want to use.
+ //
+ // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
+ // will disable the cardinality limits.
+ CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+ })
+)
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
new file mode 100644
index 0000000000..e0fd86ca78
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -0,0 +1,203 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ManualReader is a simple Reader that allows an application to
+// read metrics on demand.
+type ManualReader struct {
+ sdkProducer atomic.Value
+ shutdownOnce sync.Once
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+}
+
+// Compile time check the manualReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&ManualReader{}: {}}
+
+// NewManualReader returns a Reader which is directly called to collect metrics.
+func NewManualReader(opts ...ManualReaderOption) *ManualReader {
+ cfg := newManualReaderConfig(opts)
+ r := &ManualReader{
+ temporalitySelector: cfg.temporalitySelector,
+ aggregationSelector: cfg.aggregationSelector,
+ }
+ r.externalProducers.Store(cfg.producers)
+ return r
+}
+
+// register stores the sdkProducer which enables the caller
+// to read metrics from the SDK on demand.
+func (mr *ManualReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register manual reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return mr.temporalitySelector(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return mr.aggregationSelector(kind)
+}
+
+// Shutdown closes any connections and frees any resources used by the reader.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Shutdown(context.Context) error {
+ err := ErrReaderShutdown
+ mr.shutdownOnce.Do(func() {
+ // Any future call to Collect will now return ErrReaderShutdown.
+ mr.sdkProducer.Store(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+ mr.mu.Lock()
+ defer mr.mu.Unlock()
+ mr.isShutdown = true
+ // release references to Producer(s)
+ mr.externalProducers.Store([]Producer{})
+ err = nil
+ })
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+ }
+ p := mr.sdkProducer.Load()
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("manual reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, producer := range mr.externalProducers.Load().([]Producer) {
+ externalMetrics, err := producer.Produce(ctx)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("ManualReader collection", "Data", rm)
+
+ return unifyErrors(errs)
+}
+
+// MarshalLog returns logging data about the ManualReader.
+func (r *ManualReader) MarshalLog() interface{} {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Registered bool
+ Shutdown bool
+ }{
+ Type: "ManualReader",
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ }
+}
+
+// manualReaderConfig contains configuration options for a ManualReader.
+type manualReaderConfig struct {
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+ producers []Producer
+}
+
+// newManualReaderConfig returns a manualReaderConfig configured with options.
+func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
+ cfg := manualReaderConfig{
+ temporalitySelector: DefaultTemporalitySelector,
+ aggregationSelector: DefaultAggregationSelector,
+ }
+ for _, opt := range opts {
+ cfg = opt.applyManual(cfg)
+ }
+ return cfg
+}
+
+// ManualReaderOption applies a configuration option value to a ManualReader.
+type ManualReaderOption interface {
+ applyManual(manualReaderConfig) manualReaderConfig
+}
+
+// WithTemporalitySelector sets the TemporalitySelector a reader will use to
+// determine the Temporality of an instrument based on its kind. If this
+// option is not used, the reader will use the DefaultTemporalitySelector.
+func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
+ return temporalitySelectorOption{selector: selector}
+}
+
+type temporalitySelectorOption struct {
+ selector func(instrument InstrumentKind) metricdata.Temporality
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
+ mrc.temporalitySelector = t.selector
+ return mrc
+}
+
+// WithAggregationSelector sets the AggregationSelector a reader will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// or the aggregation explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
+ return aggregationSelectorOption{selector: selector}
+}
+
+type aggregationSelectorOption struct {
+ selector AggregationSelector
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.aggregationSelector = t.selector
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
new file mode 100644
index 0000000000..479b7610eb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -0,0 +1,709 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+// ErrInstrumentName indicates the created instrument has an invalid name.
+// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
+var ErrInstrumentName = errors.New("invalid instrument name")
+
+// meter handles the creation and coordination of all metric instruments. A
+// meter represents a single instrumentation scope; all metric telemetry
+// produced by an instrumentation scope will use metric instruments from a
+// single meter.
+type meter struct {
+ embedded.Meter
+
+ scope instrumentation.Scope
+ pipes pipelines
+
+ int64Insts *cacheWithErr[instID, *int64Inst]
+ float64Insts *cacheWithErr[instID, *float64Inst]
+ int64ObservableInsts *cacheWithErr[instID, int64Observable]
+ float64ObservableInsts *cacheWithErr[instID, float64Observable]
+
+ int64Resolver resolver[int64]
+ float64Resolver resolver[float64]
+}
+
+func newMeter(s instrumentation.Scope, p pipelines) *meter {
+ // viewCache ensures instrument conflicts, including number conflicts, this
+ // meter is asked to create are logged to the user.
+ var viewCache cache[string, instID]
+
+ var int64Insts cacheWithErr[instID, *int64Inst]
+ var float64Insts cacheWithErr[instID, *float64Inst]
+ var int64ObservableInsts cacheWithErr[instID, int64Observable]
+ var float64ObservableInsts cacheWithErr[instID, float64Observable]
+
+ return &meter{
+ scope: s,
+ pipes: p,
+ int64Insts: &int64Insts,
+ float64Insts: &float64Insts,
+ int64ObservableInsts: &int64ObservableInsts,
+ float64ObservableInsts: &float64ObservableInsts,
+ int64Resolver: newResolver[int64](p, &viewCache),
+ float64Resolver: newResolver[float64](p, &viewCache),
+ }
+}
+
+// Compile-time check meter implements metric.Meter.
+var _ metric.Meter = (*meter)(nil)
+
+// Int64Counter returns a new instrument identified by name and configured with
+// options. The instrument is used to synchronously record increasing int64
+// measurements during a computational operation.
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ cfg := metric.NewInt64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// int64 measurements during a computational operation.
+func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ cfg := metric.NewInt64HistogramConfig(options...)
+ p := int64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64Gauge returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ cfg := metric.NewInt64GaugeConfig(options...)
+ const kind = InstrumentKindGauge
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// int64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
+ inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.int64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := int64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Int64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableUpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// int64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64Counter returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record increasing
+// float64 measurements during a computational operation.
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ cfg := metric.NewFloat64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// float64 measurements during a computational operation.
+func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ p := float64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64Gauge returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ cfg := metric.NewFloat64GaugeConfig(options...)
+ const kind = InstrumentKindGauge
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// float64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
+ inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.float64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+ for _, cback := range callbacks {
+ inst := float64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Float64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableUpDownCounter returns a new instrument identified by name
+// and configured with options. The instrument is used to asynchronously record
+// float64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+func validateInstrumentName(name string) error {
+ if len(name) == 0 {
+ return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
+ }
+ if len(name) > 255 {
+ return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
+ }
+ if !isAlpha([]rune(name)[0]) {
+ return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
+ }
+ if len(name) == 1 {
+ return nil
+ }
+ for _, c := range name[1:] {
+ if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
+ return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
+ }
+ }
+ return nil
+}
+
+func isAlpha(c rune) bool {
+ return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+
+func isAlphanumeric(c rune) bool {
+ return isAlpha(c) || ('0' <= c && c <= '9')
+}
+
+func warnRepeatedObservableCallbacks(id Instrument) {
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
+ "instrument", inst,
+ )
+}
+
+// RegisterCallback registers f to be called each collection cycle so it will
+// make observations for insts during those cycles.
+//
+// The only instruments f can make observations for are insts. All other
+// observations will be dropped and an error will be logged.
+//
+// Only instruments from this meter can be registered with f, an error is
+// returned if other instrument are provided.
+//
+// Only observations made in the callback will be exported. Unlike synchronous
+// instruments, asynchronous callbacks can "forget" attribute sets that are no
+// longer relevant by omitting the observation during the callback.
+//
+// The returned Registration can be used to unregister f.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+ if len(insts) == 0 {
+ // Don't allocate a observer if not needed.
+ return noopRegister{}, nil
+ }
+
+ reg := newObserver()
+ var errs multierror
+ for _, inst := range insts {
+ // Unwrap any global.
+ if u, ok := inst.(interface {
+ Unwrap() metric.Observable
+ }); ok {
+ inst = u.Unwrap()
+ }
+
+ switch o := inst.(type) {
+ case int64Observable:
+ if err := o.registerable(m); err != nil {
+ if !errors.Is(err, errEmptyAgg) {
+ errs.append(err)
+ }
+ continue
+ }
+ reg.registerInt64(o.observablID)
+ case float64Observable:
+ if err := o.registerable(m); err != nil {
+ if !errors.Is(err, errEmptyAgg) {
+ errs.append(err)
+ }
+ continue
+ }
+ reg.registerFloat64(o.observablID)
+ default:
+ // Instrument external to the SDK.
+ return nil, fmt.Errorf("invalid observable: from different implementation")
+ }
+ }
+
+ err := errs.errorOrNil()
+ if reg.len() == 0 {
+ // All insts use drop aggregation or are invalid.
+ return noopRegister{}, err
+ }
+
+ // Some or all instruments were valid.
+ cback := func(ctx context.Context) error { return f(ctx, reg) }
+ return m.pipes.registerMultiCallback(cback), err
+}
+
+type observer struct {
+ embedded.Observer
+
+ float64 map[observablID[float64]]struct{}
+ int64 map[observablID[int64]]struct{}
+}
+
+func newObserver() observer {
+ return observer{
+ float64: make(map[observablID[float64]]struct{}),
+ int64: make(map[observablID[int64]]struct{}),
+ }
+}
+
+func (r observer) len() int {
+ return len(r.float64) + len(r.int64)
+}
+
+func (r observer) registerFloat64(id observablID[float64]) {
+ r.float64[id] = struct{}{}
+}
+
+func (r observer) registerInt64(id observablID[int64]) {
+ r.int64[id] = struct{}{}
+}
+
+var (
+ errUnknownObserver = errors.New("unknown observable instrument")
+ errUnregObserver = errors.New("observable instrument not registered for callback")
+)
+
+func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
+ var oImpl float64Observable
+ switch conv := o.(type) {
+ case float64Observable:
+ oImpl = conv
+ case interface {
+ Unwrap() metric.Observable
+ }:
+ // Unwrap any global.
+ async := conv.Unwrap()
+ var ok bool
+ if oImpl, ok = async.(float64Observable); !ok {
+ global.Error(errUnknownObserver, "failed to record asynchronous")
+ return
+ }
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.float64[oImpl.observablID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", float64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ oImpl.observe(v, c.Attributes())
+}
+
+func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
+ var oImpl int64Observable
+ switch conv := o.(type) {
+ case int64Observable:
+ oImpl = conv
+ case interface {
+ Unwrap() metric.Observable
+ }:
+ // Unwrap any global.
+ async := conv.Unwrap()
+ var ok bool
+ if oImpl, ok = async.(int64Observable); !ok {
+ global.Error(errUnknownObserver, "failed to record asynchronous")
+ return
+ }
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.int64[oImpl.observablID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", int64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ oImpl.observe(v, c.Attributes())
+}
+
+type noopRegister struct{ embedded.Registration }
+
+func (noopRegister) Unregister() error {
+ return nil
+}
+
+// int64InstProvider provides int64 OpenTelemetry instruments.
+type int64InstProvider struct{ *meter }
+
+func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.int64Resolver.Aggregators(inst)
+}
+
+func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
+ return p.meter.int64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// float64InstProvider provides float64 OpenTelemetry instruments.
+type float64InstProvider struct{ *meter }
+
+func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.float64Resolver.Aggregators(inst)
+}
+
+func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
+ return p.meter.float64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+type int64Observer struct {
+ embedded.Int64Observer
+ measures[int64]
+}
+
+func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
+
+type float64Observer struct {
+ embedded.Float64Observer
+ measures[float64]
+}
+
+func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
new file mode 100644
index 0000000000..d1390df1b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
@@ -0,0 +1,3 @@
+# SDK Metric data
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
new file mode 100644
index 0000000000..d32cfc67d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
@@ -0,0 +1,296 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
+// that created them.
+type ResourceMetrics struct {
+ // Resource represents the entity that collected the metrics.
+ Resource *resource.Resource
+ // ScopeMetrics are the collection of metrics with unique Scopes.
+ ScopeMetrics []ScopeMetrics
+}
+
+// ScopeMetrics is a collection of Metrics Produces by a Meter.
+type ScopeMetrics struct {
+ // Scope is the Scope that the Meter was created with.
+ Scope instrumentation.Scope
+ // Metrics are a list of aggregations created by the Meter.
+ Metrics []Metrics
+}
+
+// Metrics is a collection of one or more aggregated timeseries from an Instrument.
+type Metrics struct {
+ // Name is the name of the Instrument that created this data.
+ Name string
+ // Description is the description of the Instrument, which can be used in documentation.
+ Description string
+ // Unit is the unit in which the Instrument reports.
+ Unit string
+ // Data is the aggregated data from an Instrument.
+ Data Aggregation
+}
+
+// Aggregation is the store of data reported by an Instrument.
+// It will be one of: Gauge, Sum, Histogram.
+type Aggregation interface {
+ privateAggregation()
+}
+
+// Gauge represents a measurement of the current value of an instrument.
+type Gauge[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+}
+
+func (Gauge[N]) privateAggregation() {}
+
+// Sum represents the sum of all measurements of values from an instrument.
+type Sum[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+ // IsMonotonic represents if this aggregation only increases or decreases.
+ IsMonotonic bool
+}
+
+func (Sum[N]) privateAggregation() {}
+
+// DataPoint is a single data point in a timeseries.
+type DataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started. (optional)
+ StartTime time.Time `json:",omitempty"`
+ // Time is the time when the timeseries was recorded. (optional)
+ Time time.Time `json:",omitempty"`
+ // Value is the value of this data point.
+ Value N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// Histogram represents the histogram of all measurements of values from an instrument.
+type Histogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []HistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (Histogram[N]) privateAggregation() {}
+
+// HistogramDataPoint is a single histogram data point in a timeseries.
+type HistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Bounds are the upper bounds of the buckets of the histogram. Because the
+ // last boundary is +infinity this one is implied.
+ Bounds []float64
+ // BucketCounts is the count of each of the buckets.
+ BucketCounts []uint64
+
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
+type ExponentialHistogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []ExponentialHistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (ExponentialHistogram[N]) privateAggregation() {}
+
+// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
+type ExponentialHistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Scale describes the resolution of the histogram. Boundaries are
+ // located at powers of the base, where:
+ //
+ // base = 2 ^ (2 ^ -Scale)
+ Scale int32
+ // ZeroCount is the number of values whose absolute value
+ // is less than or equal to [ZeroThreshold].
+ // When ZeroThreshold is 0, this is the number of values that
+ // cannot be expressed using the standard exponential formula
+ // as well as values that have been rounded to zero.
+ // ZeroCount represents the special zero count bucket.
+ ZeroCount uint64
+
+ // PositiveBucket is range of positive value bucket counts.
+ PositiveBucket ExponentialBucket
+ // NegativeBucket is range of negative value bucket counts.
+ NegativeBucket ExponentialBucket
+
+ // ZeroThreshold is the width of the zero region. Where the zero region is
+ // defined as the closed interval [-ZeroThreshold, ZeroThreshold].
+ ZeroThreshold float64
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialBucket struct {
+ // Offset is the bucket index of the first entry in the Counts slice.
+ Offset int32
+ // Counts is an slice where Counts[i] carries the count of the bucket at
+ // index (Offset+i). Counts[i] is the count of values greater than
+ // base^(Offset+i) and less than or equal to base^(Offset+i+1).
+ Counts []uint64
+}
+
+// Extrema is the minimum or maximum value of a dataset.
+type Extrema[N int64 | float64] struct {
+ value N
+ valid bool
+}
+
+// MarshalText converts the Extrema value to text.
+func (e Extrema[N]) MarshalText() ([]byte, error) {
+ if !e.valid {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(e.value)
+}
+
+// MarshalJSON converts the Extrema value to JSON number.
+func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
+ return e.MarshalText()
+}
+
+// NewExtrema returns an Extrema set to v.
+func NewExtrema[N int64 | float64](v N) Extrema[N] {
+ return Extrema[N]{value: v, valid: true}
+}
+
+// Value returns the Extrema value and true if the Extrema is defined.
+// Otherwise, if the Extrema is its zero-value, defined will be false.
+func (e Extrema[N]) Value() (v N, defined bool) {
+ return e.value, e.valid
+}
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar[N int64 | float64] struct {
+ // FilteredAttributes are the attributes recorded with the measurement but
+ // filtered out of the timeseries' aggregated data.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was recorded.
+ Time time.Time
+ // Value is the measured value.
+ Value N
+ // SpanID is the ID of the span that was active during the measurement. If
+ // no span was active or the span was not sampled this will be empty.
+ SpanID []byte `json:",omitempty"`
+ // TraceID is the ID of the trace the active span belonged to during the
+ // measurement. If no span was active or the span was not sampled this will
+ // be empty.
+ TraceID []byte `json:",omitempty"`
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// data type.
+//
+// These data points cannot always be merged in a meaningful way. The Summary
+// type is only used by bridges from other metrics libraries, and cannot be
+// produced using OpenTelemetry instrumentation.
+type Summary struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []SummaryDataPoint
+}
+
+func (Summary) privateAggregation() {}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this summary has been calculated with.
+ Count uint64
+
+ // Sum is the sum of the values recorded.
+ Sum float64
+
+ // (Optional) list of values at different quantiles of the distribution calculated
+ // from the current snapshot. The quantiles must be strictly increasing.
+ QuantileValues []QuantileValue
+}
+
+// QuantileValue is the value at a given quantile of a summary.
+type QuantileValue struct {
+ // Quantile is the quantile of this value.
+ //
+ // Must be in the interval [0.0, 1.0].
+ Quantile float64
+
+ // Value is the value at the given quantile of a summary.
+ //
+ // Quantile values must NOT be negative.
+ Value float64
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
new file mode 100644
index 0000000000..187713dadf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=Temporality
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+// Temporality defines the window that an aggregation was calculated over.
+type Temporality uint8
+
+const (
+ // undefinedTemporality represents an unset Temporality.
+ //nolint:deadcode,unused,varcheck
+ undefinedTemporality Temporality = iota
+
+ // CumulativeTemporality defines a measurement interval that continues to
+ // expand forward in time from a starting point. New measurements are
+ // added to all previous measurements since a start time.
+ CumulativeTemporality
+
+ // DeltaTemporality defines a measurement interval that resets each cycle.
+ // Measurements from one cycle are recorded independently, measurements
+ // from other cycles do not affect them.
+ DeltaTemporality
+)
+
+// MarshalText returns the byte encoded of t.
+func (t Temporality) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
new file mode 100644
index 0000000000..4da833cdce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[undefinedTemporality-0]
+ _ = x[CumulativeTemporality-1]
+ _ = x[DeltaTemporality-2]
+}
+
+const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
+
+var _Temporality_index = [...]uint8{0, 20, 41, 57}
+
+func (i Temporality) String() string {
+ if i >= Temporality(len(_Temporality_index)-1) {
+ return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
new file mode 100644
index 0000000000..9cdd9384c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -0,0 +1,370 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Default periodic reader timing.
+const (
+ defaultTimeout = time.Millisecond * 30000
+ defaultInterval = time.Millisecond * 60000
+)
+
+// periodicReaderConfig contains configuration options for a PeriodicReader.
+type periodicReaderConfig struct {
+ interval time.Duration
+ timeout time.Duration
+ producers []Producer
+}
+
+// newPeriodicReaderConfig returns a periodicReaderConfig configured with
+// options.
+func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
+ c := periodicReaderConfig{
+ interval: envDuration(envInterval, defaultInterval),
+ timeout: envDuration(envTimeout, defaultTimeout),
+ }
+ for _, o := range options {
+ c = o.applyPeriodic(c)
+ }
+ return c
+}
+
+// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
+type PeriodicReaderOption interface {
+ applyPeriodic(periodicReaderConfig) periodicReaderConfig
+}
+
+// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
+type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
+
+// applyPeriodic returns a periodicReaderConfig with option(s) applied.
+func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
+ return o(conf)
+}
+
+// WithTimeout configures the time a PeriodicReader waits for an export to
+// complete before canceling it. This includes an export which occurs as part
+// of Shutdown or ForceFlush if the user passed context does not have a
+// deadline. If the user passed context does have a deadline, it will be used
+// instead.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 30 seconds
+// is used as the default.
+func WithTimeout(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.timeout = d
+ return conf
+ })
+}
+
+// WithInterval configures the intervening time between exports for a
+// PeriodicReader.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_INTERVAL environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 60 seconds
+// is used as the default.
+func WithInterval(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.interval = d
+ return conf
+ })
+}
+
+// NewPeriodicReader returns a Reader that collects and exports metric data to
+// the exporter at a defined interval. By default, the returned Reader will
+// collect and export data every 60 seconds, and will cancel any attempts that
+// exceed 30 seconds, collect and export combined. The collect and export time
+// are not counted towards the interval between attempts.
+//
+// The Collect method of the returned Reader continues to gather and return
+// metric data to the user. It will not automatically send that data to the
+// exporter. That is left to the user to accomplish.
+func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
+ conf := newPeriodicReaderConfig(options)
+ ctx, cancel := context.WithCancel(context.Background())
+ r := &PeriodicReader{
+ interval: conf.interval,
+ timeout: conf.timeout,
+ exporter: exporter,
+ flushCh: make(chan chan error),
+ cancel: cancel,
+ done: make(chan struct{}),
+ rmPool: sync.Pool{
+ New: func() interface{} {
+ return &metricdata.ResourceMetrics{}
+ },
+ },
+ }
+ r.externalProducers.Store(conf.producers)
+
+ go func() {
+ defer func() { close(r.done) }()
+ r.run(ctx, conf.interval)
+ }()
+
+ return r
+}
+
+// PeriodicReader is a Reader that continuously collects and exports metric
+// data at a set interval.
+type PeriodicReader struct {
+ sdkProducer atomic.Value
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ interval time.Duration
+ timeout time.Duration
+ exporter Exporter
+ flushCh chan chan error
+
+ done chan struct{}
+ cancel context.CancelFunc
+ shutdownOnce sync.Once
+
+ rmPool sync.Pool
+}
+
+// Compile time check the periodicReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
+
+// newTicker allows testing override.
+var newTicker = time.NewTicker
+
+// run continuously collects and exports metric data at the specified
+// interval. This will run until ctx is canceled or times out.
+func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
+ ticker := newTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ err := r.collectAndExport(ctx)
+ if err != nil {
+ otel.Handle(err)
+ }
+ case errCh := <-r.flushCh:
+ errCh <- r.collectAndExport(ctx)
+ ticker.Reset(interval)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// register registers p as the producer of this reader.
+func (r *PeriodicReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register periodic reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return r.exporter.Temporality(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return r.exporter.Aggregation(kind)
+}
+
+// collectAndExport gather all metric data related to the periodicReader r from
+// the SDK and exports it with r's exporter.
+func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+
+ // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
+ rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err := r.Collect(ctx, rm)
+ if err == nil {
+ err = r.export(ctx, rm)
+ }
+ r.rmPool.Put(rm)
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm. The metric
+// data is not exported to the configured exporter, it is left to the caller to
+// handle that if desired.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
+ }
+ // TODO (#3047): When collect is updated to accept output as param, pass rm.
+ return r.collect(ctx, r.sdkProducer.Load(), rm)
+}
+
+// collect unwraps p as a produceHolder and returns its produce results.
+func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("periodic reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, producer := range r.externalProducers.Load().([]Producer) {
+ externalMetrics, err := producer.Produce(ctx)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("PeriodicReader collection", "Data", rm)
+
+ return unifyErrors(errs)
+}
+
+// export exports metric data m using r's exporter.
+func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
+ return r.exporter.Export(ctx, m)
+}
+
+// ForceFlush flushes pending telemetry.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+ }
+
+ errCh := make(chan error, 1)
+ select {
+ case r.flushCh <- errCh:
+ select {
+ case err := <-errCh:
+ if err != nil {
+ return err
+ }
+ close(errCh)
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-r.done:
+ return ErrReaderShutdown
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return r.exporter.ForceFlush(ctx)
+}
+
+// Shutdown flushes pending telemetry and then stops the export pipeline.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Shutdown(ctx context.Context) error {
+ err := ErrReaderShutdown
+ r.shutdownOnce.Do(func() {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, r.timeout)
+ defer cancel()
+ }
+
+ // Stop the run loop.
+ r.cancel()
+ <-r.done
+
+ // Any future call to Collect will now return ErrReaderShutdown.
+ ph := r.sdkProducer.Swap(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+
+ if ph != nil { // Reader was registered.
+ // Flush pending telemetry.
+ m := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err = r.collect(ctx, ph, m)
+ if err == nil {
+ err = r.export(ctx, m)
+ }
+ r.rmPool.Put(m)
+ }
+
+ sErr := r.exporter.Shutdown(ctx)
+ if err == nil || err == ErrReaderShutdown {
+ err = sErr
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.isShutdown = true
+ // release references to Producer(s)
+ r.externalProducers.Store([]Producer{})
+ })
+ return err
+}
+
+// MarshalLog returns logging data about the PeriodicReader.
+func (r *PeriodicReader) MarshalLog() interface{} {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Exporter Exporter
+ Registered bool
+ Shutdown bool
+ Interval time.Duration
+ Timeout time.Duration
+ }{
+ Type: "PeriodicReader",
+ Exporter: r.exporter,
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ Interval: r.interval,
+ Timeout: r.timeout,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
new file mode 100644
index 0000000000..c6f9597198
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -0,0 +1,655 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "container/list"
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+var (
+ errCreatingAggregators = errors.New("could not create all aggregators")
+ errIncompatibleAggregation = errors.New("incompatible aggregation")
+ errUnknownAggregation = errors.New("unrecognized aggregation")
+)
+
+// instrumentSync is a synchronization point between a pipeline and an
+// instrument's aggregate function.
+type instrumentSync struct {
+ name string
+ description string
+ unit string
+ compAgg aggregate.ComputeAggregation
+}
+
+func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
+ if res == nil {
+ res = resource.Empty()
+ }
+ return &pipeline{
+ resource: res,
+ reader: reader,
+ views: views,
+ // aggregations is lazy allocated when needed.
+ }
+}
+
+// pipeline connects all of the instruments created by a meter provider to a Reader.
+// This is the object that will be `Reader.register()` when a meter provider is created.
+//
+// As instruments are created the instrument should be checked if it exists in
+// the views of a the Reader, and if so each aggregate function should be added
+// to the pipeline.
+type pipeline struct {
+ resource *resource.Resource
+
+ reader Reader
+ views []View
+
+ sync.Mutex
+ aggregations map[instrumentation.Scope][]instrumentSync
+ callbacks []func(context.Context) error
+ multiCallbacks list.List
+}
+
+// addSync adds the instrumentSync to pipeline p with scope. This method is not
+// idempotent. Duplicate calls will result in duplicate additions, it is the
+// callers responsibility to ensure this is called with unique values.
+func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
+ p.Lock()
+ defer p.Unlock()
+ if p.aggregations == nil {
+ p.aggregations = map[instrumentation.Scope][]instrumentSync{
+ scope: {iSync},
+ }
+ return
+ }
+ p.aggregations[scope] = append(p.aggregations[scope], iSync)
+}
+
+type multiCallback func(context.Context) error
+
+// addMultiCallback registers a multi-instrument callback to be run when
+// `produce()` is called.
+func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
+ p.Lock()
+ defer p.Unlock()
+ e := p.multiCallbacks.PushBack(c)
+ return func() {
+ p.Lock()
+ p.multiCallbacks.Remove(e)
+ p.Unlock()
+ }
+}
+
+// produce returns aggregated metrics from a single collection.
+//
+// This method is safe to call concurrently.
+func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ p.Lock()
+ defer p.Unlock()
+
+ var errs multierror
+ for _, c := range p.callbacks {
+ // TODO make the callbacks parallel. ( #3034 )
+ if err := c(ctx); err != nil {
+ errs.append(err)
+ }
+ if err := ctx.Err(); err != nil {
+ rm.Resource = nil
+ rm.ScopeMetrics = rm.ScopeMetrics[:0]
+ return err
+ }
+ }
+ for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
+ // TODO make the callbacks parallel. ( #3034 )
+ f := e.Value.(multiCallback)
+ if err := f(ctx); err != nil {
+ errs.append(err)
+ }
+ if err := ctx.Err(); err != nil {
+ // This means the context expired before we finished running callbacks.
+ rm.Resource = nil
+ rm.ScopeMetrics = rm.ScopeMetrics[:0]
+ return err
+ }
+ }
+
+ rm.Resource = p.resource
+ rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
+
+ i := 0
+ for scope, instruments := range p.aggregations {
+ rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
+ j := 0
+ for _, inst := range instruments {
+ data := rm.ScopeMetrics[i].Metrics[j].Data
+ if n := inst.compAgg(&data); n > 0 {
+ rm.ScopeMetrics[i].Metrics[j].Name = inst.name
+ rm.ScopeMetrics[i].Metrics[j].Description = inst.description
+ rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
+ rm.ScopeMetrics[i].Metrics[j].Data = data
+ j++
+ }
+ }
+ rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
+ if len(rm.ScopeMetrics[i].Metrics) > 0 {
+ rm.ScopeMetrics[i].Scope = scope
+ i++
+ }
+ }
+
+ rm.ScopeMetrics = rm.ScopeMetrics[:i]
+
+ return errs.errorOrNil()
+}
+
+// inserter facilitates inserting of new instruments from a single scope into a
+// pipeline.
+type inserter[N int64 | float64] struct {
+ // aggregators is a cache that holds aggregate function inputs whose
+ // outputs have been inserted into the underlying reader pipeline. This
+ // cache ensures no duplicate aggregate functions are inserted into the
+ // reader pipeline and if a new request during an instrument creation asks
+ // for the same aggregate function input the same instance is returned.
+ aggregators *cache[instID, aggVal[N]]
+
+ // views is a cache that holds instrument identifiers for all the
+ // instruments a Meter has created, it is provided from the Meter that owns
+ // this inserter. This cache ensures during the creation of instruments
+ // with the same name but different options (e.g. description, unit) a
+ // warning message is logged.
+ views *cache[string, instID]
+
+ pipeline *pipeline
+}
+
+func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
+ if vc == nil {
+ vc = &cache[string, instID]{}
+ }
+ return &inserter[N]{
+ aggregators: &cache[instID, aggVal[N]]{},
+ views: vc,
+ pipeline: p,
+ }
+}
+
+// Instrument inserts the instrument inst with instUnit into a pipeline. All
+// views the pipeline contains are matched against, and any matching view that
+// creates a unique aggregate function will have its output inserted into the
+// pipeline and its input included in the returned slice.
+//
+// The returned aggregate function inputs are ensured to be deduplicated and
+// unique. If another view in another pipeline that is cached by this
+// inserter's cache has already inserted the same aggregate function for the
+// same instrument, that functions input instance is returned.
+//
+// If another instrument has already been inserted by this inserter, or any
+// other using the same cache, and it conflicts with the instrument being
+// inserted in this call, an aggregate function input matching the arguments
+// will still be returned but an Info level log message will also be logged to
+// the OTel global logger.
+//
+// If the passed instrument would result in an incompatible aggregate function,
+// an error is returned and that aggregate function output is not inserted nor
+// is its input returned.
+//
+// If an instrument is determined to use a Drop aggregation, that instrument is
+// not inserted nor returned.
+func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
+ var (
+ matched bool
+ measures []aggregate.Measure[N]
+ )
+
+ errs := &multierror{wrapped: errCreatingAggregators}
+ seen := make(map[uint64]struct{})
+ for _, v := range i.pipeline.views {
+ stream, match := v(inst)
+ if !match {
+ continue
+ }
+ matched = true
+ in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if err != nil {
+ errs.append(err)
+ }
+ if in == nil { // Drop aggregation.
+ continue
+ }
+ if _, ok := seen[id]; ok {
+ // This aggregate function has already been added.
+ continue
+ }
+ seen[id] = struct{}{}
+ measures = append(measures, in)
+ }
+
+ if matched {
+ return measures, errs.errorOrNil()
+ }
+
+ // Apply implicit default view if no explicit matched.
+ stream := Stream{
+ Name: inst.Name,
+ Description: inst.Description,
+ Unit: inst.Unit,
+ }
+ in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if err != nil {
+ errs.append(err)
+ }
+ if in != nil {
+ // Ensured to have not seen given matched was false.
+ measures = append(measures, in)
+ }
+ return measures, errs.errorOrNil()
+}
+
+// addCallback registers a single instrument callback to be run when
+// `produce()` is called.
+func (i *inserter[N]) addCallback(cback func(context.Context) error) {
+ i.pipeline.Lock()
+ defer i.pipeline.Unlock()
+ i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
+}
+
+var aggIDCount uint64
+
+// aggVal is the cached value in an aggregators cache.
+type aggVal[N int64 | float64] struct {
+ ID uint64
+ Measure aggregate.Measure[N]
+ Err error
+}
+
+// readerDefaultAggregation returns the default aggregation for the instrument
+// kind based on the reader's aggregation preferences. This is used unless the
+// aggregation is overridden with a view.
+func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
+ aggregation := i.pipeline.reader.aggregation(kind)
+ switch aggregation.(type) {
+ case nil, AggregationDefault:
+ // If the reader returns default or nil use the default selector.
+ aggregation = DefaultAggregationSelector(kind)
+ default:
+ // Deep copy and validate before using.
+ aggregation = aggregation.copy()
+ if err := aggregation.err(); err != nil {
+ orig := aggregation
+ aggregation = DefaultAggregationSelector(kind)
+ global.Error(
+ err, "using default aggregation instead",
+ "aggregation", orig,
+ "replacement", aggregation,
+ )
+ }
+ }
+ return aggregation
+}
+
+// cachedAggregator returns the appropriate aggregate input and output
+// functions for an instrument configuration. If the exact instrument has been
+// created within the inst.Scope, those aggregate function instances will be
+// returned. Otherwise, new computed aggregate functions will be cached and
+// returned.
+//
+// If the instrument configuration conflicts with an instrument that has
+// already been created (e.g. description, unit, data type) a warning will be
+// logged at the "Info" level with the global OTel logger. Valid new aggregate
+// functions for the instrument configuration will still be returned without an
+// error.
+//
+// If the instrument defines an unknown or incompatible aggregation, an error
+// is returned.
+func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
+ switch stream.Aggregation.(type) {
+ case nil:
+ // The aggregation was not overridden with a view. Use the aggregation
+ // provided by the reader.
+ stream.Aggregation = readerAggregation
+ case AggregationDefault:
+ // The view explicitly requested the default aggregation.
+ stream.Aggregation = DefaultAggregationSelector(kind)
+ }
+
+ if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
+ return nil, 0, fmt.Errorf(
+ "creating aggregator with instrumentKind: %d, aggregation %v: %w",
+ kind, stream.Aggregation, err,
+ )
+ }
+
+ id := i.instID(kind, stream)
+ // If there is a conflict, the specification says the view should
+ // still be applied and a warning should be logged.
+ i.logConflict(id)
+
+ // If there are requests for the same instrument with different name
+ // casing, the first-seen needs to be returned. Use a normalize ID for the
+ // cache lookup to ensure the correct comparison.
+ normID := id.normalize()
+ cv := i.aggregators.Lookup(normID, func() aggVal[N] {
+ b := aggregate.Builder[N]{
+ Temporality: i.pipeline.reader.temporality(kind),
+ ReservoirFunc: reservoirFunc(stream.Aggregation),
+ }
+ b.Filter = stream.AttributeFilter
+ // A value less than or equal to zero will disable the aggregation
+ // limits for the builder (an all the created aggregates).
+ // CardinalityLimit.Lookup returns 0 by default if unset (or
+ // unrecognized input). Use that value directly.
+ b.AggregationLimit, _ = x.CardinalityLimit.Lookup()
+
+ in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
+ if err != nil {
+ return aggVal[N]{0, nil, err}
+ }
+ if in == nil { // Drop aggregator.
+ return aggVal[N]{0, nil, nil}
+ }
+ i.pipeline.addSync(scope, instrumentSync{
+ // Use the first-seen name casing for this and all subsequent
+ // requests of this instrument.
+ name: stream.Name,
+ description: stream.Description,
+ unit: stream.Unit,
+ compAgg: out,
+ })
+ id := atomic.AddUint64(&aggIDCount, 1)
+ return aggVal[N]{id, in, err}
+ })
+ return cv.Measure, cv.ID, cv.Err
+}
+
+// logConflict validates if an instrument with the same case-insensitive name
+// as id has already been created. If that instrument conflicts with id, a
+// warning is logged.
+func (i *inserter[N]) logConflict(id instID) {
+ // The API specification defines names as case-insensitive. If there is a
+ // different casing of a name it needs to be a conflict.
+ name := id.normalize().Name
+ existing := i.views.Lookup(name, func() instID { return id })
+ if id == existing {
+ return
+ }
+
+ const msg = "duplicate metric stream definitions"
+ args := []interface{}{
+ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
+ "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
+ "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
+ "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
+ "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
+ }
+
+ // The specification recommends logging a suggested view to resolve
+ // conflicts if possible.
+ //
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
+ if id.Unit != existing.Unit || id.Number != existing.Number {
+ // There is no view resolution for these, don't make a suggestion.
+ global.Warn(msg, args...)
+ return
+ }
+
+ var stream string
+ if id.Name != existing.Name || id.Kind != existing.Kind {
+ stream = `Stream{Name: "{{NEW_NAME}}"}`
+ } else if id.Description != existing.Description {
+ stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
+ }
+
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
+
+ global.Warn(msg, args...)
+}
+
+func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
+ var zero N
+ return instID{
+ Name: stream.Name,
+ Description: stream.Description,
+ Unit: stream.Unit,
+ Kind: kind,
+ Number: fmt.Sprintf("%T", zero),
+ }
+}
+
+// aggregateFunc returns new aggregate functions matching agg, kind, and
+// monotonic. If the agg is unknown or temporality is invalid, an error is
+// returned.
+func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
+ switch a := agg.(type) {
+ case AggregationDefault:
+ return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
+ case AggregationDrop:
+ // Return nil in and out to signify the drop aggregator.
+ case AggregationLastValue:
+ switch kind {
+ case InstrumentKindGauge:
+ meas, comp = b.LastValue()
+ case InstrumentKindObservableGauge:
+ meas, comp = b.PrecomputedLastValue()
+ }
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter:
+ meas, comp = b.PrecomputedSum(true)
+ case InstrumentKindObservableUpDownCounter:
+ meas, comp = b.PrecomputedSum(false)
+ case InstrumentKindCounter, InstrumentKindHistogram:
+ meas, comp = b.Sum(true)
+ default:
+ // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
+ // instrumentKindUndefined or other invalid instrument kinds.
+ meas, comp = b.Sum(false)
+ }
+ case AggregationExplicitBucketHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
+ case AggregationBase2ExponentialHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
+
+ default:
+ err = errUnknownAggregation
+ }
+
+ return meas, comp, err
+}
+
+// isAggregatorCompatible checks if the aggregation can be used by the instrument.
+// Current compatibility:
+//
+// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
+// |--------------------------|------|-----------|-----|-----------|-----------------------|
+// | Counter | ✓ | | ✓ | ✓ | ✓ |
+// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Histogram | ✓ | | ✓ | ✓ | ✓ |
+// | Gauge | ✓ | ✓ | | ✓ | ✓ |
+// | Observable Counter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |.
+func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
+ switch agg.(type) {
+ case AggregationDefault:
+ return nil
+ case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
+ switch kind {
+ case InstrumentKindCounter,
+ InstrumentKindUpDownCounter,
+ InstrumentKindHistogram,
+ InstrumentKindGauge,
+ InstrumentKindObservableCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindObservableGauge:
+ return nil
+ default:
+ return errIncompatibleAggregation
+ }
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
+ return nil
+ default:
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ }
+ case AggregationLastValue:
+ switch kind {
+ case InstrumentKindObservableGauge, InstrumentKindGauge:
+ return nil
+ }
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ case AggregationDrop:
+ return nil
+ default:
+ // This is used passed checking for default, it should be an error at this point.
+ return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
+ }
+}
+
+// pipelines is the group of pipelines connecting Readers with instrument
+// measurement.
+type pipelines []*pipeline
+
+func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
+ pipes := make([]*pipeline, 0, len(readers))
+ for _, r := range readers {
+ p := newPipeline(res, r, views)
+ r.register(p)
+ pipes = append(pipes, p)
+ }
+ return pipes
+}
+
+func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
+ unregs := make([]func(), len(p))
+ for i, pipe := range p {
+ unregs[i] = pipe.addMultiCallback(c)
+ }
+ return unregisterFuncs{f: unregs}
+}
+
+type unregisterFuncs struct {
+ embedded.Registration
+ f []func()
+}
+
+func (u unregisterFuncs) Unregister() error {
+ for _, f := range u.f {
+ f()
+ }
+ return nil
+}
+
+// resolver facilitates resolving aggregate functions an instrument calls to
+// aggregate measurements with while updating all pipelines that need to pull
+// from those aggregations.
+type resolver[N int64 | float64] struct {
+ inserters []*inserter[N]
+}
+
+func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
+ in := make([]*inserter[N], len(p))
+ for i := range in {
+ in[i] = newInserter[N](p[i], vc)
+ }
+ return resolver[N]{in}
+}
+
+// Aggregators returns the Aggregators that must be updated by the instrument
+// defined by key.
+func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ errs := &multierror{}
+ for _, i := range r.inserters {
+ in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ errs.append(err)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, errs.errorOrNil()
+}
+
+// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
+// defined by key. If boundaries were provided on instrument instantiation, those take precedence
+// over boundaries provided by the reader.
+func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ errs := &multierror{}
+ for _, i := range r.inserters {
+ agg := i.readerDefaultAggregation(id.Kind)
+ if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
+ histAgg.Boundaries = boundaries
+ agg = histAgg
+ }
+ in, err := i.Instrument(id, agg)
+ if err != nil {
+ errs.append(err)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, errs.errorOrNil()
+}
+
+type multierror struct {
+ wrapped error
+ errors []string
+}
+
+func (m *multierror) errorOrNil() error {
+ if len(m.errors) == 0 {
+ return nil
+ }
+ if m.wrapped == nil {
+ return errors.New(strings.Join(m.errors, "; "))
+ }
+ return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
+}
+
+func (m *multierror) append(err error) {
+ m.errors = append(m.errors, err.Error())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
new file mode 100644
index 0000000000..a82af538e6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+)
+
+// MeterProvider handles the creation and coordination of Meters. All Meters
+// created by a MeterProvider will be associated with the same Resource, have
+// the same Views applied to them, and have their produced metric telemetry
+// passed to the configured Readers.
+type MeterProvider struct {
+ embedded.MeterProvider
+
+ pipes pipelines
+ meters cache[instrumentation.Scope, *meter]
+
+ forceFlush, shutdown func(context.Context) error
+ stopped atomic.Bool
+}
+
+// Compile-time check MeterProvider implements metric.MeterProvider.
+var _ metric.MeterProvider = (*MeterProvider)(nil)
+
+// NewMeterProvider returns a new and configured MeterProvider.
+//
+// By default, the returned MeterProvider is configured with the default
+// Resource and no Readers. Readers cannot be added after a MeterProvider is
+// created. This means the returned MeterProvider, one created with no
+// Readers, will perform no operations.
+func NewMeterProvider(options ...Option) *MeterProvider {
+ conf := newConfig(options)
+ flush, sdown := conf.readerSignals()
+
+ mp := &MeterProvider{
+ pipes: newPipelines(conf.res, conf.readers, conf.views),
+ forceFlush: flush,
+ shutdown: sdown,
+ }
+ // Log after creation so all readers show correctly they are registered.
+ global.Info("MeterProvider created",
+ "Resource", conf.res,
+ "Readers", conf.readers,
+ "Views", len(conf.views),
+ )
+ return mp
+}
+
+// Meter returns a Meter with the given name and configured with options.
+//
+// The name should be the name of the instrumentation scope creating
+// telemetry. This name may be the same as the instrumented code only if that
+// code provides built-in instrumentation.
+//
+// Calls to the Meter method after Shutdown has been called will return Meters
+// that perform no operations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
+ if name == "" {
+ global.Warn("Invalid Meter name.", "name", name)
+ }
+
+ if mp.stopped.Load() {
+ return noop.Meter{}
+ }
+
+ c := metric.NewMeterConfig(options...)
+ s := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ }
+
+ global.Info("Meter created",
+ "Name", s.Name,
+ "Version", s.Version,
+ "SchemaURL", s.SchemaURL,
+ )
+
+ return mp.meters.Lookup(s, func() *meter {
+ return newMeter(s, mp.pipes)
+ })
+}
+
+// ForceFlush flushes all pending telemetry.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// ForceFlush calls ForceFlush(context.Context) error
+// on all Readers that implements this method.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
+ if mp.forceFlush != nil {
+ return mp.forceFlush(ctx)
+ }
+ return nil
+}
+
+// Shutdown shuts down the MeterProvider flushing all pending telemetry and
+// releasing any held computational resources.
+//
+// This call is idempotent. The first call will perform all flush and
+// releasing operations. Subsequent calls will perform no action and will
+// return an error stating this.
+//
+// Measurements made by instruments from meters this MeterProvider created
+// will not be exported after Shutdown is called.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Shutdown(ctx context.Context) error {
+ // Even though it may seem like there is a synchronization issue between the
+ // call to `Store` and checking `shutdown`, the Go concurrency model ensures
+ // that is not the case, as all the atomic operations executed in a program
+ // behave as though executed in some sequentially consistent order. This
+ // definition provides the same semantics as C++'s sequentially consistent
+ // atomics and Java's volatile variables.
+ // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
+
+ mp.stopped.Store(true)
+ if mp.shutdown != nil {
+ return mp.shutdown(ctx)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
new file mode 100644
index 0000000000..a55f9a5372
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// errDuplicateRegister is logged by a Reader when an attempt to registered it
+// more than once occurs.
+var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
+
+// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
+// the reader is registered with a MeterProvider.
+var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
+
+// ErrReaderShutdown is returned if Collect or Shutdown are called after a
+// reader has been Shutdown once.
+var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
+
+// errNonPositiveDuration is logged when an environmental variable
+// has non-positive value.
+var errNonPositiveDuration = fmt.Errorf("non-positive duration")
+
+// Reader is the interface used between the SDK and an
+// exporter. Control flow is bi-directional through the
+// Reader, since the SDK initiates ForceFlush and Shutdown
+// while the exporter initiates collection. The Register() method here
+// informs the Reader that it can begin reading, signaling the
+// start of bi-directional control flow.
+//
+// Typically, push-based exporters that are periodic will
+// implement PeroidicExporter themselves and construct a
+// PeriodicReader to satisfy this interface.
+//
+// Pull-based exporters will typically implement Register
+// themselves, since they read on demand.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Reader interface {
+ // register registers a Reader with a MeterProvider.
+ // The producer argument allows the Reader to signal the sdk to collect
+ // and send aggregated metric measurements.
+ register(sdkProducer)
+
+ // temporality reports the Temporality for the instrument kind provided.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ temporality(InstrumentKind) metricdata.Temporality
+
+ // aggregation returns what Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
+
+ // Collect gathers and returns all metric data related to the Reader from
+ // the SDK and stores it in out. An error is returned if this is called
+ // after Shutdown or if out is nil.
+ //
+ // This method needs to be concurrent safe, and the cancellation of the
+ // passed context is expected to be honored.
+ Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric measurements held in an export pipeline and releases any
+ // held computational resources.
+ //
+ // This deadline or cancellation of the passed context are honored. An appropriate
+ // error will be returned in these situations. There is no guaranteed that all
+ // telemetry be flushed or all resources have been released in these
+ // situations.
+ //
+ // After Shutdown is called, calls to Collect will perform no operation and instead will return
+ // an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// sdkProducer produces metrics for a Reader.
+type sdkProducer interface {
+ // produce returns aggregated metrics from a single collection.
+ //
+ // This method is safe to call concurrently.
+ produce(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// Producer produces metrics for a Reader from an external source.
+type Producer interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Produce returns aggregated metrics from an external source.
+ //
+ // This method should be safe to call concurrently.
+ Produce(context.Context) ([]metricdata.ScopeMetrics, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// produceHolder is used as an atomic.Value to wrap the non-concrete producer
+// type.
+type produceHolder struct {
+ produce func(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// shutdownProducer produces an ErrReaderShutdown error always.
+type shutdownProducer struct{}
+
+// produce returns an ErrReaderShutdown error.
+func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
+ return ErrReaderShutdown
+}
+
+// TemporalitySelector selects the temporality to use based on the InstrumentKind.
+type TemporalitySelector func(InstrumentKind) metricdata.Temporality
+
+// DefaultTemporalitySelector is the default TemporalitySelector used if
+// WithTemporalitySelector is not provided. CumulativeTemporality will be used
+// for all instrument kinds if this TemporalitySelector is used.
+func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+// AggregationSelector selects the aggregation and the parameters to use for
+// that aggregation based on the InstrumentKind.
+//
+// If the Aggregation returned is nil or DefaultAggregation, the selection from
+// DefaultAggregationSelector will be used.
+type AggregationSelector func(InstrumentKind) Aggregation
+
+// DefaultAggregationSelector returns the default aggregation and parameters
+// that will be used to summarize measurement made from an instrument of
+// InstrumentKind. This AggregationSelector using the following selection
+// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
+// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
+// Histogram ⇨ ExplicitBucketHistogram.
+func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
+ switch ik {
+ case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
+ return AggregationSum{}
+ case InstrumentKindObservableGauge, InstrumentKindGauge:
+ return AggregationLastValue{}
+ case InstrumentKindHistogram:
+ return AggregationExplicitBucketHistogram{
+ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
+ NoMinMax: false,
+ }
+ }
+ panic("unknown instrument kind")
+}
+
+// ReaderOption is an option which can be applied to manual or Periodic
+// readers.
+type ReaderOption interface {
+ PeriodicReaderOption
+ ManualReaderOption
+}
+
+// WithProducer registers producers as an external Producer of metric data
+// for this Reader.
+func WithProducer(p Producer) ReaderOption {
+ return producerOption{p: p}
+}
+
+type producerOption struct {
+ p Producer
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
+
+// applyPeriodic returns a periodicReaderConfig with option applied.
+func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
new file mode 100644
index 0000000000..43f85cfbcf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.27.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
new file mode 100644
index 0000000000..11e334319d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
@@ -0,0 +1,117 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+var (
+ errMultiInst = errors.New("name replacement for multiple instruments")
+ errEmptyView = errors.New("no criteria provided for view")
+
+ emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
+)
+
+// View is an override to the default behavior of the SDK. It defines how data
+// should be collected for certain instruments. It returns true and the exact
+// Stream to use for matching Instruments. Otherwise, if the view does not
+// match, false is returned.
+type View func(Instrument) (Stream, bool)
+
+// NewView returns a View that applies the Stream mask for all instruments that
+// match criteria. The returned View will only apply mask if all non-zero-value
+// fields of criteria match the corresponding Instrument passed to the view. If
+// no criteria are provided, all field of criteria are their zero-values, a
+// view that matches no instruments is returned. If you need to match a
+// zero-value field, create a View directly.
+//
+// The Name field of criteria supports wildcard pattern matching. The "*"
+// wildcard is recognized as matching zero or more characters, and "?" is
+// recognized as matching exactly one character. For example, a pattern of "*"
+// matches all instrument names.
+//
+// The Stream mask only applies updates for non-zero-value fields. By default,
+// the Instrument the View matches against will be use for the Name,
+// Description, and Unit of the returned Stream and no Aggregation or
+// AttributeFilter are set. All non-zero-value fields of mask are used instead
+// of the default. If you need to zero out an Stream field returned from a
+// View, create a View directly.
+func NewView(criteria Instrument, mask Stream) View {
+ if criteria.empty() {
+ global.Error(
+ errEmptyView, "dropping view",
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ var matchFunc func(Instrument) bool
+ if strings.ContainsAny(criteria.Name, "*?") {
+ if mask.Name != "" {
+ global.Error(
+ errMultiInst, "dropping view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ // Handle branching here in NewView instead of criteria.matches so
+ // criteria.matches remains inlinable for the simple case.
+ pattern := regexp.QuoteMeta(criteria.Name)
+ pattern = "^" + pattern + "$"
+ pattern = strings.ReplaceAll(pattern, `\?`, ".")
+ pattern = strings.ReplaceAll(pattern, `\*`, ".*")
+ re := regexp.MustCompile(pattern)
+ matchFunc = func(i Instrument) bool {
+ return re.MatchString(i.Name) &&
+ criteria.matchesDescription(i) &&
+ criteria.matchesKind(i) &&
+ criteria.matchesUnit(i) &&
+ criteria.matchesScope(i)
+ }
+ } else {
+ matchFunc = criteria.matches
+ }
+
+ var agg Aggregation
+ if mask.Aggregation != nil {
+ agg = mask.Aggregation.copy()
+ if err := agg.err(); err != nil {
+ global.Error(
+ err, "not using aggregation with view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ agg = nil
+ }
+ }
+
+ return func(i Instrument) (Stream, bool) {
+ if matchFunc(i) {
+ return Stream{
+ Name: nonZero(mask.Name, i.Name),
+ Description: nonZero(mask.Description, i.Description),
+ Unit: nonZero(mask.Unit, i.Unit),
+ Aggregation: agg,
+ AttributeFilter: mask.AttributeFilter,
+ }, true
+ }
+ return Stream{}, false
+ }
+}
+
+// nonZero returns v if it is non-zero-valued, otherwise alt.
+func nonZero[T comparable](v, alt T) T {
+ var zero T
+ if v != zero {
+ return v
+ }
+ return alt
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
new file mode 100644
index 0000000000..4ad864d716
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
@@ -0,0 +1,3 @@
+# SDK Resource
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 0000000000..95a61d61d4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Detect returns an initialized Resource based on gathered information.
+ // If the source information to construct a Resource contains invalid
+ // values, a Resource is returned with the valid parts of the source
+ // information used for initialization along with an appropriately
+ // wrapped ErrPartialResource error.
+ Detect(ctx context.Context) (*Resource, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+ r := new(Resource)
+ return r, detect(ctx, r, detectors)
+}
+
+// detect runs all detectors using ctx and merges the result into res. This
+// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
+func detect(ctx context.Context, res *Resource, detectors []Detector) error {
+ var (
+ r *Resource
+ errs detectErrs
+ err error
+ )
+
+ for _, detector := range detectors {
+ if detector == nil {
+ continue
+ }
+ r, err = detector.Detect(ctx)
+ if err != nil {
+ errs = append(errs, err)
+ if !errors.Is(err, ErrPartialResource) {
+ continue
+ }
+ }
+ r, err = Merge(res, r)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ *res = *r
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+ if errors.Is(errs, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
+ return errs
+}
+
+type detectErrs []error
+
+func (e detectErrs) Error() string {
+ errStr := make([]string, len(e))
+ for i, err := range e {
+ errStr[i] = fmt.Sprintf("* %s", err)
+ }
+
+ format := "%d errors occurred detecting resource:\n\t%s"
+ return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t"))
+}
+
+func (e detectErrs) Unwrap() error {
+ switch len(e) {
+ case 0:
+ return nil
+ case 1:
+ return e[0]
+ }
+ return e[1:]
+}
+
+func (e detectErrs) Is(target error) bool {
+ return len(e) != 0 && errors.Is(e[0], target)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 0000000000..50d2df5eb4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,97 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk"
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+type (
+ // telemetrySDK is a Detector that provides information about
+ // the OpenTelemetry SDK used. This Detector is included as a
+ // builtin. If these resource attributes are not wanted, use
+ // the WithTelemetrySDK(nil) or WithoutBuiltin() options to
+ // explicitly disable them.
+ telemetrySDK struct{}
+
+ // host is a Detector that provides information about the host
+ // being run on. This Detector is included as a builtin. If
+ // these resource attributes are not wanted, use the
+ // WithHost(nil) or WithoutBuiltin() options to explicitly
+ // disable them.
+ host struct{}
+
+ stringDetector struct {
+ schemaURL string
+ K attribute.Key
+ F func() (string, error)
+ }
+
+ defaultServiceNameDetector struct{}
+)
+
+var (
+ _ Detector = telemetrySDK{}
+ _ Detector = host{}
+ _ Detector = stringDetector{}
+ _ Detector = defaultServiceNameDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.TelemetrySDKName("opentelemetry"),
+ semconv.TelemetrySDKLanguageGo,
+ semconv.TelemetrySDKVersion(sdk.Version()),
+ ), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+ return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+ value, err := sd.F()
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+ }
+ a := sd.K.String(value)
+ if !a.Valid() {
+ return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+ }
+ return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceNameKey,
+ func() (string, error) {
+ executable, err := os.Executable()
+ if err != nil {
+ return "unknown_service:go", nil
+ }
+ return "unknown_service:" + filepath.Base(executable), nil
+ },
+ ).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 0000000000..0d6e213d92
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+ // detectors that will be evaluated.
+ detectors []Detector
+ // SchemaURL to associate with the Resource.
+ schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+ // apply sets the Option value of a config.
+ apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+ return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+ attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+ return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+ return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+ detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+ cfg.detectors = append(cfg.detectors, o.detectors...)
+ return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+ return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+ return WithDetectors(host{})
+}
+
+// WithHostID adds host ID information to the configured resource.
+func WithHostID() Option {
+ return WithDetectors(hostIDDetector{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+ return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+ return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+ cfg.schemaURL = string(o)
+ return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+ return WithDetectors(
+ osTypeDetector{},
+ osDescriptionDetector{},
+ )
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+ return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+ return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+ return WithDetectors(
+ processPIDDetector{},
+ processExecutableNameDetector{},
+ processExecutablePathDetector{},
+ processCommandArgsDetector{},
+ processOwnerDetector{},
+ processRuntimeNameDetector{},
+ processRuntimeVersionDetector{},
+ processRuntimeDescriptionDetector{},
+ )
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+ return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+ return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+ return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+ return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+ return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+ return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+ return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+ return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+ return WithDetectors(
+ cgroupContainerIDDetector{},
+ )
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+ return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 0000000000..7525ee75f0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "regexp"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+ containerID containerIDProvider = getContainerIDFromCGroup
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ containerID, err := containerID()
+ if err != nil {
+ return nil, err
+ }
+
+ if containerID == "" {
+ return Empty(), nil
+ }
+ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+ defaultOSStat = os.Stat
+ osStat = defaultOSStat
+
+ defaultOSOpen = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+ if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+ // File does not exist, skip
+ return "", nil
+ }
+
+ file, err := osOpen(cgroupPath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if id := getContainerIDFromLine(line); id != "" {
+ return id
+ }
+ }
+ return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+ matches := cgroupContainerIDRe.FindStringSubmatch(line)
+ if len(matches) <= 1 {
+ return ""
+ }
+ return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 0000000000..64939a2713
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `=,=,...`).
+//
+// While this package provides a stable API,
+// the attributes added by resource detectors may change.
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 0000000000..0d5a355ab9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+const (
+ // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
+
+ // svcNameKey is the environment variable name that Service Name information will be read from.
+ svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment. This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+ attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+ svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+ if attrs == "" && svcName == "" {
+ return Empty(), nil
+ }
+
+ var res *Resource
+
+ if svcName != "" {
+ res = NewSchemaless(semconv.ServiceName(svcName))
+ }
+
+ r2, err := constructOTResources(attrs)
+
+ // Ensure that the resource with the service name from OTEL_SERVICE_NAME
+ // takes precedence, if it was defined.
+ res, err2 := Merge(r2, res)
+
+ if err == nil {
+ err = err2
+ } else if err2 != nil {
+ err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+ }
+
+ return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+ if s == "" {
+ return Empty(), nil
+ }
+ pairs := strings.Split(s, ",")
+ var attrs []attribute.KeyValue
+ var invalid []string
+ for _, p := range pairs {
+ k, v, found := strings.Cut(p, "=")
+ if !found {
+ invalid = append(invalid, p)
+ continue
+ }
+ key := strings.TrimSpace(k)
+ val, err := url.PathUnescape(strings.TrimSpace(v))
+ if err != nil {
+ // Retain original value if decoding fails, otherwise it will be
+ // an empty string.
+ val = v
+ otel.Handle(err)
+ }
+ attrs = append(attrs, attribute.String(key, val))
+ }
+ var err error
+ if len(invalid) > 0 {
+ err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+ }
+ return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
new file mode 100644
index 0000000000..3c1aa6285b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -0,0 +1,109 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+type hostIDProvider func() (string, error)
+
+var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
+
+var hostID = defaultHostIDProvider
+
+type hostIDReader interface {
+ read() (string, error)
+}
+
+type fileReader func(string) (string, error)
+
+type commandExecutor func(string, ...string) (string, error)
+
+// hostIDReaderBSD implements hostIDReader.
+type hostIDReaderBSD struct {
+ execCommand commandExecutor
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/hostid. If not found it will
+// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
+// error will be returned.
+func (r *hostIDReaderBSD) read() (string, error) {
+ if result, err := r.readFile("/etc/hostid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/hostid or kenv")
+}
+
+// hostIDReaderDarwin implements hostIDReader.
+type hostIDReaderDarwin struct {
+ execCommand commandExecutor
+}
+
+// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// from the IOPlatformUUID line. If the command fails or the uuid cannot be
+// parsed an error will be returned.
+func (r *hostIDReaderDarwin) read() (string, error) {
+ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ if err != nil {
+ return "", err
+ }
+
+ lines := strings.Split(result, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.Split(line, " = ")
+ if len(parts) == 2 {
+ return strings.Trim(parts[1], "\""), nil
+ }
+ break
+ }
+ }
+
+ return "", errors.New("could not parse IOPlatformUUID")
+}
+
+type hostIDReaderLinux struct {
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/machine-id followed by
+// /var/lib/dbus/machine-id. If neither location yields an ID an error will
+// be returned.
+func (r *hostIDReaderLinux) read() (string, error) {
+ if result, err := r.readFile("/etc/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
+}
+
+type hostIDDetector struct{}
+
+// Detect returns a *Resource containing the platform specific host id.
+func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ hostID, err := hostID()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.HostID(hostID),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
new file mode 100644
index 0000000000..cc8b8938ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build dragonfly || freebsd || netbsd || openbsd || solaris
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderBSD{
+ execCommand: execCommand,
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
new file mode 100644
index 0000000000..b09fde3b73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
+ execCommand: execCommand,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
new file mode 100644
index 0000000000..d9e5d1a8ff
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os/exec"
+
+func execCommand(name string, arg ...string) (string, error) {
+ cmd := exec.Command(name, arg...)
+ b, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
new file mode 100644
index 0000000000..f84f173240
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux
+// +build linux
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderLinux{
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
new file mode 100644
index 0000000000..6354b35602
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os"
+
+func readFile(filename string) (string, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
new file mode 100644
index 0000000000..df12c44c56
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// hostIDReaderUnsupported is a placeholder implementation for operating systems
+// for which this project currently doesn't support host.id
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+type hostIDReaderUnsupported struct{}
+
+func (*hostIDReaderUnsupported) read() (string, error) {
+ return "", nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
new file mode 100644
index 0000000000..71386e2da4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build windows
+// +build windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "golang.org/x/sys/windows/registry"
+)
+
+// implements hostIDReader
+type hostIDReaderWindows struct{}
+
+// read reads MachineGuid from the windows registry key:
+// SOFTWARE\Microsoft\Cryptography
+func (*hostIDReaderWindows) read() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
+ registry.QUERY_VALUE|registry.WOW64_64KEY,
+ )
+
+ if err != nil {
+ return "", err
+ }
+ defer k.Close()
+
+ guid, _, err := k.GetStringValue("MachineGuid")
+ if err != nil {
+ return "", err
+ }
+
+ return guid, nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 0000000000..ff78020fa1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+ setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+ osDescription = osDescriptionProvider
+}
+
+type (
+ osTypeDetector struct{}
+ osDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+ osType := runtimeOS()
+
+ osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ osTypeAttribute,
+ ), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ description, err := osDescription()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.OSDescription(description),
+ ), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+ // the elements in this map are the intersection between
+ // available GOOS values and defined semconv OS types
+ osTypeAttributeMap := map[string]attribute.KeyValue{
+ "aix": semconv.OSTypeAIX,
+ "darwin": semconv.OSTypeDarwin,
+ "dragonfly": semconv.OSTypeDragonflyBSD,
+ "freebsd": semconv.OSTypeFreeBSD,
+ "linux": semconv.OSTypeLinux,
+ "netbsd": semconv.OSTypeNetBSD,
+ "openbsd": semconv.OSTypeOpenBSD,
+ "solaris": semconv.OSTypeSolaris,
+ "windows": semconv.OSTypeWindows,
+ "zos": semconv.OSTypeZOS,
+ }
+
+ var osTypeAttribute attribute.KeyValue
+
+ if attr, ok := osTypeAttributeMap[osType]; ok {
+ osTypeAttribute = attr
+ } else {
+ osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+ }
+
+ return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 0000000000..ce455dc544
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,91 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "os"
+)
+
+type plist struct {
+ XMLName xml.Name `xml:"plist"`
+ Dict dict `xml:"dict"`
+}
+
+type dict struct {
+ Key []string `xml:"key"`
+ String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+ file, err := getPlistFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values, err := parsePlistFile(file)
+ if err != nil {
+ return ""
+ }
+
+ return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{
+ "/System/Library/CoreServices/SystemVersion.plist",
+ "/System/Library/CoreServices/ServerVersion.plist",
+ })
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated and elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+ var v plist
+
+ err := xml.NewDecoder(file).Decode(&v)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(v.Dict.Key) != len(v.Dict.String) {
+ return nil, fmt.Errorf("the number of and elements doesn't match")
+ }
+
+ properties := make(map[string]string, len(v.Dict.Key))
+ for i, key := range v.Dict.Key {
+ properties[key] = v.Dict.String[i]
+ }
+
+ return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+ productName := properties["ProductName"]
+ productVersion := properties["ProductVersion"]
+ productBuildVersion := properties["ProductBuildVersion"]
+
+ if productName == "" || productVersion == "" || productBuildVersion == "" {
+ return ""
+ }
+
+ return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 0000000000..f537e5ca5c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+ file, err := getOSReleaseFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values := parseOSReleaseFile(file)
+
+ return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+ values := make(map[string]string)
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if skip(line) {
+ continue
+ }
+
+ key, value, ok := parse(line)
+ if ok {
+ values[key] = value
+ }
+ }
+
+ return values
+}
+
+// skip returns true if the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+ line = strings.TrimSpace(line)
+
+ return len(line) == 0 || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+ k, v, found := strings.Cut(line, "=")
+
+ if !found || len(k) == 0 {
+ return "", "", false
+ }
+
+ key := strings.TrimSpace(k)
+ value := unescape(unquote(strings.TrimSpace(v)))
+
+ return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+ if len(s) < 2 {
+ return s
+ }
+
+ if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+ return s[1 : len(s)-1]
+ }
+
+ return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+ return strings.NewReplacer(
+ `\$`, `$`,
+ `\"`, `"`,
+ `\'`, `'`,
+ `\\`, `\`,
+ "\\`", "`",
+ ).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+ var osRelease string
+
+ name := values["NAME"]
+ version := values["VERSION"]
+
+ if version == "" {
+ version = values["VERSION_ID"]
+ }
+
+ if name != "" && version != "" {
+ osRelease = fmt.Sprintf("%s %s", name, version)
+ } else {
+ osRelease = values["PRETTY_NAME"]
+ }
+
+ return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 0000000000..a6ff26a4d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,79 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+ setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+ currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+ uname, err := uname()
+ if err != nil {
+ return "", err
+ }
+
+ osRelease := osRelease()
+ if osRelease != "" {
+ return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+ }
+
+ return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+ var utsName unix.Utsname
+
+ err := currentUnameProvider(&utsName)
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%s %s %s %s %s",
+ unix.ByteSliceToString(utsName.Sysname[:]),
+ unix.ByteSliceToString(utsName.Nodename[:]),
+ unix.ByteSliceToString(utsName.Release[:]),
+ unix.ByteSliceToString(utsName.Version[:]),
+ unix.ByteSliceToString(utsName.Machine[:]),
+ ), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+ for _, c := range candidates {
+ file, err := os.Open(c)
+ if err == nil {
+ return file, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 0000000000..a77742b077
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+ return "", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 0000000000..5e3d199d78
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "strconv"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+
+ if err != nil {
+ return "", err
+ }
+
+ defer k.Close()
+
+ var (
+ productName = readProductName(k)
+ displayVersion = readDisplayVersion(k)
+ releaseID = readReleaseID(k)
+ currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+ currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+ currentBuildNumber = readCurrentBuildNumber(k)
+ ubr = readUBR(k)
+ )
+
+ if displayVersion != "" {
+ displayVersion += " "
+ }
+
+ return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+ productName,
+ displayVersion,
+ releaseID,
+ currentMajorVersionNumber,
+ currentMinorVersionNumber,
+ currentBuildNumber,
+ ubr,
+ ), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+ value, _, _ := k.GetStringValue(name)
+
+ return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+ value, _, _ := k.GetIntegerValue(name)
+
+ return value
+}
+
+func readProductName(k registry.Key) string {
+ return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+ return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+ return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+ return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 0000000000..e4e1df8c98
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,173 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+)
+
+type (
+ pidProvider func() int
+ executablePathProvider func() (string, error)
+ commandArgsProvider func() []string
+ ownerProvider func() (*user.User, error)
+ runtimeNameProvider func() string
+ runtimeVersionProvider func() string
+ runtimeOSProvider func() string
+ runtimeArchProvider func() string
+)
+
+var (
+ defaultPidProvider pidProvider = os.Getpid
+ defaultExecutablePathProvider executablePathProvider = os.Executable
+ defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
+ defaultOwnerProvider ownerProvider = user.Current
+ defaultRuntimeNameProvider runtimeNameProvider = func() string {
+ if runtime.Compiler == "gc" {
+ return "go"
+ }
+ return runtime.Compiler
+ }
+ defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+ defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
+ defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
+)
+
+var (
+ pid = defaultPidProvider
+ executablePath = defaultExecutablePathProvider
+ commandArgs = defaultCommandArgsProvider
+ owner = defaultOwnerProvider
+ runtimeName = defaultRuntimeNameProvider
+ runtimeVersion = defaultRuntimeVersionProvider
+ runtimeOS = defaultRuntimeOSProvider
+ runtimeArch = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+ setOSProviders(
+ defaultPidProvider,
+ defaultExecutablePathProvider,
+ defaultCommandArgsProvider,
+ )
+}
+
+func setOSProviders(
+ pidProvider pidProvider,
+ executablePathProvider executablePathProvider,
+ commandArgsProvider commandArgsProvider,
+) {
+ pid = pidProvider
+ executablePath = executablePathProvider
+ commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+ setRuntimeProviders(
+ defaultRuntimeNameProvider,
+ defaultRuntimeVersionProvider,
+ defaultRuntimeOSProvider,
+ defaultRuntimeArchProvider,
+ )
+}
+
+func setRuntimeProviders(
+ runtimeNameProvider runtimeNameProvider,
+ runtimeVersionProvider runtimeVersionProvider,
+ runtimeOSProvider runtimeOSProvider,
+ runtimeArchProvider runtimeArchProvider,
+) {
+ runtimeName = runtimeNameProvider
+ runtimeVersion = runtimeVersionProvider
+ runtimeOS = runtimeOSProvider
+ runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+ setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+ owner = ownerProvider
+}
+
+type (
+ processPIDDetector struct{}
+ processExecutableNameDetector struct{}
+ processExecutablePathDetector struct{}
+ processCommandArgsDetector struct{}
+ processOwnerDetector struct{}
+ processRuntimeNameDetector struct{}
+ processRuntimeVersionDetector struct{}
+ processRuntimeDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ executableName := filepath.Base(commandArgs()[0])
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+ executablePath, err := executablePath()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+ owner, err := owner()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ runtimeDescription := fmt.Sprintf(
+ "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ProcessRuntimeDescription(runtimeDescription),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 0000000000..9f1af3a236
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,287 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed. Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`). The `nil` value is equivalent to an empty
+// Resource.
+type Resource struct {
+ attrs attribute.Set
+ schemaURL string
+}
+
+var (
+ defaultResource *Resource
+ defaultResourceOnce sync.Once
+)
+
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
+
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+ cfg := config{}
+ for _, opt := range opts {
+ cfg = opt.apply(cfg)
+ }
+
+ r := &Resource{schemaURL: cfg.schemaURL}
+ return r, detect(ctx, r, cfg.detectors)
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+ resource := NewSchemaless(attrs...)
+ resource.schemaURL = schemaURL
+ return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+ if len(attrs) == 0 {
+ return &Resource{}
+ }
+
+ // Ensure attributes comply with the specification:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
+ s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+ return kv.Valid()
+ })
+
+ // If attrs only contains invalid entries do not allocate a new resource.
+ if s.Len() == 0 {
+ return &Resource{}
+ }
+
+ return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
+func (r *Resource) MarshalLog() interface{} {
+ return struct {
+ Attributes attribute.Set
+ SchemaURL string
+ }{
+ Attributes: r.attrs,
+ SchemaURL: r.schemaURL,
+ }
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+ if r == nil {
+ return ""
+ }
+ return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.Iter()
+}
+
+// Equal returns true when a Resource is equivalent to this Resource.
+func (r *Resource) Equal(eq *Resource) bool {
+ if r == nil {
+ r = Empty()
+ }
+ if eq == nil {
+ eq = Empty()
+ }
+ return r.Equivalent() == eq.Equivalent()
+}
+
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
+//
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
+func Merge(a, b *Resource) (*Resource, error) {
+ if a == nil && b == nil {
+ return Empty(), nil
+ }
+ if a == nil {
+ return b, nil
+ }
+ if b == nil {
+ return a, nil
+ }
+
+ // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+ // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+ mi := attribute.NewMergeIterator(b.Set(), a.Set())
+ combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for mi.Next() {
+ combine = append(combine, mi.Attribute())
+ }
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+ return &Resource{}
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+ defaultResourceOnce.Do(func() {
+ var err error
+ defaultResource, err = Detect(
+ context.Background(),
+ defaultServiceNameDetector{},
+ fromEnv{},
+ telemetrySDK{},
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ // If Detect did not return a valid resource, fall back to emptyResource.
+ if defaultResource == nil {
+ defaultResource = &Resource{}
+ }
+ })
+ return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+ detector := &fromEnv{}
+ resource, err := detector.Detect(context.Background())
+ if err != nil {
+ otel.Handle(err)
+ }
+ return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+ return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+ if r == nil {
+ r = Empty()
+ }
+ return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+ if r == nil {
+ return 0
+ }
+ return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
new file mode 100644
index 0000000000..f2936e1439
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
@@ -0,0 +1,3 @@
+# SDK Trace
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 0000000000..8a89fffdb4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,409 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+ DefaultMaxQueueSize = 2048
+ DefaultScheduleDelay = 5000
+ DefaultExportTimeout = 30000
+ DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+ // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+ // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+ // The default value of MaxQueueSize is 2048.
+ MaxQueueSize int
+
+ // BatchTimeout is the maximum duration for constructing a batch. Processor
+ // forcefully sends available spans when timeout is reached.
+ // The default value of BatchTimeout is 5000 msec.
+ BatchTimeout time.Duration
+
+ // ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+ // is reached, the export will be cancelled.
+ // The default value of ExportTimeout is 30000 msec.
+ ExportTimeout time.Duration
+
+ // MaxExportBatchSize is the maximum number of spans to process in a single batch.
+ // If there are more than one batch worth of spans then it processes multiple batches
+ // of spans one batch after the other without any delay.
+ // The default value of MaxExportBatchSize is 512.
+ MaxExportBatchSize int
+
+ // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+ // AND if BlockOnQueueFull is set to true.
+ // Blocking option should be used carefully as it can severely affect the performance of an
+ // application.
+ BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+ e SpanExporter
+ o BatchSpanProcessorOptions
+
+ queue chan ReadOnlySpan
+ dropped uint32
+
+ batch []ReadOnlySpan
+ batchMutex sync.Mutex
+ timer *time.Timer
+ stopWait sync.WaitGroup
+ stopOnce sync.Once
+ stopCh chan struct{}
+ stopped atomic.Bool
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will perform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+ maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+ maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+ if maxExportBatchSize > maxQueueSize {
+ if DefaultMaxExportBatchSize > maxQueueSize {
+ maxExportBatchSize = maxQueueSize
+ } else {
+ maxExportBatchSize = DefaultMaxExportBatchSize
+ }
+ }
+
+ o := BatchSpanProcessorOptions{
+ BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+ ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+ MaxQueueSize: maxQueueSize,
+ MaxExportBatchSize: maxExportBatchSize,
+ }
+ for _, opt := range options {
+ opt(&o)
+ }
+ bsp := &batchSpanProcessor{
+ e: exporter,
+ o: o,
+ batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+ timer: time.NewTimer(o.BatchTimeout),
+ queue: make(chan ReadOnlySpan, o.MaxQueueSize),
+ stopCh: make(chan struct{}),
+ }
+
+ bsp.stopWait.Add(1)
+ go func() {
+ defer bsp.stopWait.Done()
+ bsp.processQueue()
+ bsp.drainQueue()
+ }()
+
+ return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+ // Do not enqueue spans after Shutdown.
+ if bsp.stopped.Load() {
+ return
+ }
+
+ // Do not enqueue spans if we are just going to drop them.
+ if bsp.e == nil {
+ return
+ }
+ bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ bsp.stopOnce.Do(func() {
+ bsp.stopped.Store(true)
+ wait := make(chan struct{})
+ go func() {
+ close(bsp.stopCh)
+ bsp.stopWait.Wait()
+ if bsp.e != nil {
+ if err := bsp.e.Shutdown(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ close(wait)
+ }()
+ // Wait until the wait group is done or the context is cancelled
+ select {
+ case <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ })
+ return err
+}
+
+type forceFlushSpan struct {
+ ReadOnlySpan
+ flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+ return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+ // Interrupt if context is already canceled.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Do nothing after Shutdown.
+ if bsp.stopped.Load() {
+ return nil
+ }
+
+ var err error
+ if bsp.e != nil {
+ flushCh := make(chan struct{})
+ if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+ select {
+ case <-bsp.stopCh:
+ // The batchSpanProcessor is Shutdown.
+ return nil
+ case <-flushCh:
+ // Processed any items in queue prior to ForceFlush being called
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ wait := make(chan error)
+ go func() {
+ wait <- bsp.exportSpans(ctx)
+ close(wait)
+ }()
+ // Wait until the export is finished or the context is cancelled/timed out
+ select {
+ case err = <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ }
+ return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxQueueSize = size
+ }
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxExportBatchSize = size
+ }
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BatchTimeout = delay
+ }
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.ExportTimeout = timeout
+ }
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BlockOnQueueFull = true
+ }
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+ bsp.timer.Reset(bsp.o.BatchTimeout)
+
+ bsp.batchMutex.Lock()
+ defer bsp.batchMutex.Unlock()
+
+ if bsp.o.ExportTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
+ defer cancel()
+ }
+
+ if l := len(bsp.batch); l > 0 {
+ global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+ // A new batch is always created after exporting, even if the batch failed to be exported.
+ //
+ // It is up to the exporter to implement any type of retry logic if a batch is failing
+ // to be exported, since it is specific to the protocol and backend being sent to.
+ bsp.batch = bsp.batch[:0]
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+ defer bsp.timer.Stop()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case <-bsp.stopCh:
+ return
+ case <-bsp.timer.C:
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ case sd := <-bsp.queue:
+ if ffs, ok := sd.(forceFlushSpan); ok {
+ close(ffs.flushed)
+ continue
+ }
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+ if shouldExport {
+ if !bsp.timer.Stop() {
+ <-bsp.timer.C
+ }
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ }
+ }
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case sd := <-bsp.queue:
+ if _, ok := sd.(forceFlushSpan); ok {
+ // Ignore flush requests as they are not valid spans.
+ continue
+ }
+
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+
+ if shouldExport {
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ default:
+ // There are no more enqueued spans. Make final export.
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ return
+ }
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+ ctx := context.TODO()
+ if bsp.o.BlockOnQueueFull {
+ bsp.enqueueBlockOnQueueFull(ctx, sd)
+ } else {
+ bsp.enqueueDrop(ctx, sd)
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ case <-ctx.Done():
+ return false
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ default:
+ atomic.AddUint32(&bsp.dropped, 1)
+ }
+ return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ SpanExporter SpanExporter
+ Config BatchSpanProcessorOptions
+ }{
+ Type: "BatchSpanProcessor",
+ SpanExporter: bsp.e,
+ Config: bsp.o,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 0000000000..1f60524e3e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 0000000000..60a7ed1349
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+ // Name is the name of this event
+ Name string
+
+ // Attributes describe the aspects of the event.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+
+ // Time at which this event was recorded.
+ Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 0000000000..69eb2fdfce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue struct {
+ queue []interface{}
+ capacity int
+ droppedCount int
+}
+
+func newEvictedQueue(capacity int) evictedQueue {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue{capacity: capacity}
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue) add(value interface{}) {
+ if eq.capacity == 0 {
+ eq.droppedCount++
+ return
+ }
+
+ if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+ // Drop first-in while avoiding allocating more capacity to eq.queue.
+ copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+ eq.queue = eq.queue[:eq.capacity-1]
+ eq.droppedCount++
+ }
+ eq.queue = append(eq.queue, value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 0000000000..f9633d8c57
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ crand "crypto/rand"
+ "encoding/binary"
+ "math/rand"
+ "sync"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewIDs returns a new trace and span ID.
+ NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewSpanID returns a ID for a new span in the trace with traceID.
+ NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct {
+ sync.Mutex
+ randSource *rand.Rand
+}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+ gen.Lock()
+ defer gen.Unlock()
+ sid := trace.SpanID{}
+ _, _ = gen.randSource.Read(sid[:])
+ return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+ gen.Lock()
+ defer gen.Unlock()
+ tid := trace.TraceID{}
+ _, _ = gen.randSource.Read(tid[:])
+ sid := trace.SpanID{}
+ _, _ = gen.randSource.Read(sid[:])
+ return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+ gen := &randomIDGenerator{}
+ var rngSeed int64
+ _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
+ gen.randSource = rand.New(rand.NewSource(rngSeed))
+ return gen
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 0000000000..c03bdc90f6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext trace.SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 0000000000..dec237ca73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,493 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+const (
+ defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+ // processors contains collection of SpanProcessors that are processing pipeline
+ // for spans in the trace signal.
+ // SpanProcessors registered with a TracerProvider and are called at the start
+ // and end of a Span's lifecycle, and are called in the order they are
+ // registered.
+ processors []SpanProcessor
+
+ // sampler is the default sampler used when creating new spans.
+ sampler Sampler
+
+ // idGenerator is used to generate all Span and Trace IDs when needed.
+ idGenerator IDGenerator
+
+ // spanLimits defines the attribute, event, and link limits for spans.
+ spanLimits SpanLimits
+
+ // resource contains attributes representing an entity that produces telemetry.
+ resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+ return struct {
+ SpanProcessors []SpanProcessor
+ SamplerType string
+ IDGeneratorType string
+ SpanLimits SpanLimits
+ Resource *resource.Resource
+ }{
+ SpanProcessors: cfg.processors,
+ SamplerType: fmt.Sprintf("%T", cfg.sampler),
+ IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+ SpanLimits: cfg.spanLimits,
+ Resource: cfg.resource,
+ }
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+ embedded.TracerProvider
+
+ mu sync.Mutex
+ namedTracer map[instrumentation.Scope]*tracer
+ spanProcessors atomic.Pointer[spanProcessorStates]
+
+ isShutdown atomic.Bool
+
+ // These fields are not protected by the lock mu. They are assumed to be
+ // immutable after creation of the TracerProvider.
+ sampler Sampler
+ idGenerator IDGenerator
+ spanLimits SpanLimits
+ resource *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+// - a ParentBased(AlwaysSample) Sampler
+// - a random number IDGenerator
+// - the resource.Default() Resource
+// - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+ o := tracerProviderConfig{
+ spanLimits: NewSpanLimits(),
+ }
+ o = applyTracerProviderEnvConfigs(o)
+
+ for _, opt := range opts {
+ o = opt.apply(o)
+ }
+
+ o = ensureValidTracerProviderConfig(o)
+
+ tp := &TracerProvider{
+ namedTracer: make(map[instrumentation.Scope]*tracer),
+ sampler: o.sampler,
+ idGenerator: o.idGenerator,
+ spanLimits: o.spanLimits,
+ resource: o.resource,
+ }
+ global.Info("TracerProvider created", "config", o)
+
+ spss := make(spanProcessorStates, 0, len(o.processors))
+ for _, sp := range o.processors {
+ spss = append(spss, newSpanProcessorState(sp))
+ }
+ tp.spanProcessors.Store(&spss)
+
+ return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...)
+ }
+ c := trace.NewTracerConfig(opts...)
+ if name == "" {
+ name = defaultTracerName
+ }
+ is := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ }
+
+ t, ok := func() (trace.Tracer, bool) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
+ // after the first check above but before we acquired the mutex.
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...), true
+ }
+ t, ok := p.namedTracer[is]
+ if !ok {
+ t = &tracer{
+ provider: p,
+ instrumentationScope: is,
+ }
+ p.namedTracer[is] = t
+ }
+ return t, ok
+ }()
+ if !ok {
+ // This code is outside the mutex to not hold the lock while calling third party logging code:
+ // - That code may do slow things like I/O, which would prolong the duration the lock is held,
+ // slowing down all tracing consumers.
+ // - Logging code may be instrumented with tracing and deadlock because it could try
+ // acquiring the same non-reentrant mutex.
+ global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
+ }
+ return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+
+ current := p.getSpanProcessors()
+ newSPS := make(spanProcessorStates, 0, len(current)+1)
+ newSPS = append(newSPS, current...)
+ newSPS = append(newSPS, newSpanProcessorState(sp))
+ p.spanProcessors.Store(&newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ old := p.getSpanProcessors()
+ if len(old) == 0 {
+ return
+ }
+ spss := make(spanProcessorStates, len(old))
+ copy(spss, old)
+
+ // stop the span processor if it is started and remove it from the list
+ var stopOnce *spanProcessorState
+ var idx int
+ for i, sps := range spss {
+ if sps.sp == sp {
+ stopOnce = sps
+ idx = i
+ }
+ }
+ if stopOnce != nil {
+ stopOnce.state.Do(func() {
+ if err := sp.Shutdown(context.Background()); err != nil {
+ otel.Handle(err)
+ }
+ })
+ }
+ if len(spss) > 1 {
+ copy(spss[idx:], spss[idx+1:])
+ }
+ spss[len(spss)-1] = nil
+ spss = spss[:len(spss)-1]
+
+ p.spanProcessors.Store(&spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+ spss := p.getSpanProcessors()
+ if len(spss) == 0 {
+ return nil
+ }
+
+ for _, sps := range spss {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if err := sps.sp.ForceFlush(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+// After Shutdown is called, all methods are no-ops.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+ // This check prevents deadlocks in case of recursive shutdown.
+ if p.isShutdown.Load() {
+ return nil
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown has already been done concurrently.
+ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+ return nil
+ }
+
+ var retErr error
+ for _, sps := range p.getSpanProcessors() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var err error
+ sps.state.Do(func() {
+ err = sps.sp.Shutdown(ctx)
+ })
+ if err != nil {
+ if retErr == nil {
+ retErr = err
+ } else {
+ // Poor man's list of errors
+ retErr = fmt.Errorf("%v; %v", retErr, err)
+ }
+ }
+ }
+ p.spanProcessors.Store(&spanProcessorStates{})
+ return retErr
+}
+
+func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
+ return *(p.spanProcessors.Load())
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+ apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+ return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+ return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+ return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.processors = append(cfg.processors, sp)
+ return cfg
+ })
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ var err error
+ cfg.resource, err = resource.Merge(resource.Environment(), r)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return cfg
+ })
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if g != nil {
+ cfg.idGenerator = g
+ }
+ return cfg
+ })
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if s != nil {
+ cfg.sampler = s
+ }
+ return cfg
+ })
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+ if sl.AttributeValueLengthLimit <= 0 {
+ sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+ }
+ if sl.AttributeCountLimit <= 0 {
+ sl.AttributeCountLimit = DefaultAttributeCountLimit
+ }
+ if sl.EventCountLimit <= 0 {
+ sl.EventCountLimit = DefaultEventCountLimit
+ }
+ if sl.AttributePerEventCountLimit <= 0 {
+ sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+ }
+ if sl.LinkCountLimit <= 0 {
+ sl.LinkCountLimit = DefaultLinkCountLimit
+ }
+ if sl.AttributePerLinkCountLimit <= 0 {
+ sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+ }
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = sl
+ return cfg
+ })
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = limits
+ return cfg
+ })
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+ for _, opt := range tracerProviderOptionsFromEnv() {
+ cfg = opt.apply(cfg)
+ }
+
+ return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+ var opts []TracerProviderOption
+
+ sampler, err := samplerFromEnv()
+ if err != nil {
+ otel.Handle(err)
+ }
+
+ if sampler != nil {
+ opts = append(opts, WithSampler(sampler))
+ }
+
+ return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+ if cfg.sampler == nil {
+ cfg.sampler = ParentBased(AlwaysSample())
+ }
+ if cfg.idGenerator == nil {
+ cfg.idGenerator = defaultIDGenerator()
+ }
+ if cfg.resource == nil {
+ cfg.resource = resource.Default()
+ }
+ return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 0000000000..d2d1f72466
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,97 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ tracesSamplerKey = "OTEL_TRACES_SAMPLER"
+ tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+ samplerAlwaysOn = "always_on"
+ samplerAlwaysOff = "always_off"
+ samplerTraceIDRatio = "traceidratio"
+ samplerParentBasedAlwaysOn = "parentbased_always_on"
+ samplerParsedBasedAlwaysOff = "parentbased_always_off"
+ samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+ return fmt.Sprintf("unsupported sampler: %s", string(e))
+}
+
+var (
+ errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0")
+ errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+ parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+ return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error())
+}
+
+func (e samplerArgParseError) Unwrap() error {
+ return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+ sampler, ok := os.LookupEnv(tracesSamplerKey)
+ if !ok {
+ return nil, nil
+ }
+
+ sampler = strings.ToLower(strings.TrimSpace(sampler))
+ samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+ samplerArg = strings.TrimSpace(samplerArg)
+
+ switch sampler {
+ case samplerAlwaysOn:
+ return AlwaysSample(), nil
+ case samplerAlwaysOff:
+ return NeverSample(), nil
+ case samplerTraceIDRatio:
+ if !hasSamplerArg {
+ return TraceIDRatioBased(1.0), nil
+ }
+ return parseTraceIDRatio(samplerArg)
+ case samplerParentBasedAlwaysOn:
+ return ParentBased(AlwaysSample()), nil
+ case samplerParsedBasedAlwaysOff:
+ return ParentBased(NeverSample()), nil
+ case samplerParentBasedTraceIDRatio:
+ if !hasSamplerArg {
+ return ParentBased(TraceIDRatioBased(1.0)), nil
+ }
+ ratio, err := parseTraceIDRatio(samplerArg)
+ return ParentBased(ratio), err
+ default:
+ return nil, errUnsupportedSampler(sampler)
+ }
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+ v, err := strconv.ParseFloat(arg, 64)
+ if err != nil {
+ return TraceIDRatioBased(1.0), samplerArgParseError{err}
+ }
+ if v < 0.0 {
+ return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+ }
+ if v > 1.0 {
+ return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+ }
+
+ return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 0000000000..ebb6df6c90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,282 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ShouldSample returns a SamplingResult based on a decision made from the
+ // passed parameters.
+ ShouldSample(parameters SamplingParameters) SamplingResult
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Description returns information describing the Sampler.
+ Description() string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+ ParentContext context.Context
+ TraceID trace.TraceID
+ Name string
+ Kind trace.SpanKind
+ Attributes []attribute.KeyValue
+ Links []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+ // Drop will not record the span and all attributes/events will be dropped.
+ Drop SamplingDecision = iota
+
+ // Record indicates the span's `IsRecording() == true`, but `Sampled` flag
+ // *must not* be set.
+ RecordOnly
+
+ // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
+ // *must* be set.
+ RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+ Decision SamplingDecision
+ Attributes []attribute.KeyValue
+ Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+ traceIDUpperBound uint64
+ description string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+ if x < ts.traceIDUpperBound {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: psc.TraceState(),
+ }
+ }
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: psc.TraceState(),
+ }
+}
+
+func (ts traceIDRatioSampler) Description() string {
+ return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+ if fraction >= 1 {
+ return AlwaysSample()
+ }
+
+ if fraction <= 0 {
+ fraction = 0
+ }
+
+ return &traceIDRatioSampler{
+ traceIDUpperBound: uint64(fraction * (1 << 63)),
+ description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+ }
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOnSampler) Description() string {
+ return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+ return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOffSampler) Description() string {
+ return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+ return alwaysOffSampler{}
+}
+
+// ParentBased returns a sampler decorator which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the decorated sampler is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+// - remoteParentSampled(Sampler) (default: AlwaysOn)
+// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+// - localParentSampled(Sampler) (default: AlwaysOn)
+// - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+ return parentBased{
+ root: root,
+ config: configureSamplersForParentBased(samplers),
+ }
+}
+
+type parentBased struct {
+ root Sampler
+ config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+ c := samplerConfig{
+ remoteParentSampled: AlwaysSample(),
+ remoteParentNotSampled: NeverSample(),
+ localParentSampled: AlwaysSample(),
+ localParentNotSampled: NeverSample(),
+ }
+
+ for _, so := range samplers {
+ c = so.apply(c)
+ }
+
+ return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+ remoteParentSampled, remoteParentNotSampled Sampler
+ localParentSampled, localParentNotSampled Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+ apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentSampled = o.s
+ return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentNotSampled = o.s
+ return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+ s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentSampled = o.s
+ return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentNotSampled = o.s
+ return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ if psc.IsSampled() {
+ return pb.config.remoteParentSampled.ShouldSample(p)
+ }
+ return pb.config.remoteParentNotSampled.ShouldSample(p)
+ }
+
+ if psc.IsSampled() {
+ return pb.config.localParentSampled.ShouldSample(p)
+ }
+ return pb.config.localParentNotSampled.ShouldSample(p)
+ }
+ return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+ return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+ "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+ pb.root.Description(),
+ pb.config.remoteParentSampled.Description(),
+ pb.config.remoteParentNotSampled.Description(),
+ pb.config.localParentSampled.Description(),
+ pb.config.localParentNotSampled.Description(),
+ )
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 0000000000..554111bb4a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,121 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+ exporterMu sync.Mutex
+ exporter SpanExporter
+ stopOnce sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor makes it good for testing, debugging, or showing
+// examples of other features, but it will be slow and have a high computation
+// resource usage overhead. The BatchSpanProcessor is recommended for production
+// use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+ ssp := &simpleSpanProcessor{
+ exporter: exporter,
+ }
+ global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
+
+ return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+ ssp.exporterMu.Lock()
+ defer ssp.exporterMu.Unlock()
+
+ if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+ if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+ otel.Handle(err)
+ }
+ }
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ ssp.stopOnce.Do(func() {
+ stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+ done := make(chan error)
+ return done, func() { done <- exp.Shutdown(ctx) }
+ }
+
+ // The exporter field of the simpleSpanProcessor needs to be zeroed to
+ // signal it is shut down, meaning all subsequent calls to OnEnd will
+ // be gracefully ignored. This needs to be done synchronously to avoid
+ // any race condition.
+ //
+ // A closure is used to keep reference to the exporter and then the
+ // field is zeroed. This ensures the simpleSpanProcessor is shut down
+ // before the exporter. This order is important as it avoids a potential
+ // deadlock. If the exporter shut down operation generates a span, that
+ // span would need to be exported. Meaning, OnEnd would be called and
+ // try acquiring the lock that is held here.
+ ssp.exporterMu.Lock()
+ done, shutdown := stopFunc(ssp.exporter)
+ ssp.exporter = nil
+ ssp.exporterMu.Unlock()
+
+ go shutdown()
+
+ // Wait for the exporter to shut down or the deadline to expire.
+ select {
+ case err = <-done:
+ case <-ctx.Done():
+ // It is possible for the exporter to have immediately shut down and
+ // the context to be done simultaneously. In that case this outer
+ // select statement will randomly choose a case. This will result in
+ // a different returned error for similar scenarios. Instead, double
+ // check if the exporter shut down at the same time and return that
+ // error if so. This will ensure consistency as well as ensure
+ // the caller knows the exporter shut down successfully (they can
+ // already determine if the deadline is expired given they passed
+ // the context).
+ select {
+ case err = <-done:
+ default:
+ err = ctx.Err()
+ }
+ }
+ })
+ return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+ return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent
+// this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Exporter SpanExporter
+ }{
+ Type: "SimpleSpanProcessor",
+ Exporter: ssp.exporter,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 0000000000..32f862790c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+ name string
+ spanContext trace.SpanContext
+ parent trace.SpanContext
+ spanKind trace.SpanKind
+ startTime time.Time
+ endTime time.Time
+ attributes []attribute.KeyValue
+ events []Event
+ links []Link
+ status Status
+ childSpanCount int
+ droppedAttributeCount int
+ droppedEventCount int
+ droppedLinkCount int
+ resource *resource.Resource
+ instrumentationScope instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+ return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+ return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+ return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+ return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+ return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+ return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+ return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+ return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+ return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+ return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+ return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library {
+ return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+ return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+ return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+ return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+ return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+ return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 0000000000..f0221eaa85
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,835 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "runtime"
+ rt "runtime/trace"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/internal"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+ // Name returns the name of the span.
+ Name() string
+ // SpanContext returns the unique SpanContext that identifies the span.
+ SpanContext() trace.SpanContext
+ // Parent returns the unique SpanContext that identifies the parent of the
+ // span if one exists. If the span has no parent the returned SpanContext
+ // will be invalid.
+ Parent() trace.SpanContext
+ // SpanKind returns the role the span plays in a Trace.
+ SpanKind() trace.SpanKind
+ // StartTime returns the time the span started recording.
+ StartTime() time.Time
+ // EndTime returns the time the span stopped recording. It will be zero if
+ // the span has not ended.
+ EndTime() time.Time
+ // Attributes returns the defining attributes of the span.
+ // The order of the returned attributes is not guaranteed to be stable across invocations.
+ Attributes() []attribute.KeyValue
+ // Links returns all the links the span has to other spans.
+ Links() []Link
+ // Events returns all the events that occurred within in the spans
+ // lifetime.
+ Events() []Event
+ // Status returns the spans status.
+ Status() Status
+ // InstrumentationScope returns information about the instrumentation
+ // scope that created the span.
+ InstrumentationScope() instrumentation.Scope
+ // InstrumentationLibrary returns information about the instrumentation
+ // library that created the span.
+ // Deprecated: please use InstrumentationScope instead.
+ InstrumentationLibrary() instrumentation.Library
+ // Resource returns information about the entity that produced the span.
+ Resource() *resource.Resource
+ // DroppedAttributes returns the number of attributes dropped by the span
+ // due to limits being reached.
+ DroppedAttributes() int
+ // DroppedLinks returns the number of links dropped by the span due to
+ // limits being reached.
+ DroppedLinks() int
+ // DroppedEvents returns the number of events dropped by the span due to
+ // limits being reached.
+ DroppedEvents() int
+ // ChildSpanCount returns the count of spans that consider the span a
+ // direct parent.
+ ChildSpanCount() int
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+ trace.Span
+ ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+ embedded.Span
+
+ // mu protects the contents of this span.
+ mu sync.Mutex
+
+ // parent holds the parent span of this span as a trace.SpanContext.
+ parent trace.SpanContext
+
+ // spanKind represents the kind of this span as a trace.SpanKind.
+ spanKind trace.SpanKind
+
+ // name is the name of this span.
+ name string
+
+ // startTime is the time at which this span was started.
+ startTime time.Time
+
+ // endTime is the time at which this span was ended. It contains the zero
+ // value of time.Time until the span is ended.
+ endTime time.Time
+
+ // status is the status of this span.
+ status Status
+
+ // childSpanCount holds the number of child spans created for this span.
+ childSpanCount int
+
+ // spanContext holds the SpanContext of this span.
+ spanContext trace.SpanContext
+
+ // attributes is a collection of user provided key/values. The collection
+ // is constrained by a configurable maximum held by the parent
+ // TracerProvider. When additional attributes are added after this maximum
+ // is reached these attributes the user is attempting to add are dropped.
+ // This dropped number of attributes is tracked and reported in the
+ // ReadOnlySpan exported when the span ends.
+ attributes []attribute.KeyValue
+ droppedAttributes int
+
+ // events are stored in FIFO queue capped by configured limit.
+ events evictedQueue
+
+ // links are stored in FIFO queue capped by configured limit.
+ links evictedQueue
+
+ // executionTracerTaskEnd ends the execution tracer span.
+ executionTracerTaskEnd func()
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+}
+
+var (
+ _ ReadWriteSpan = (*recordingSpan)(nil)
+ _ runtimeTracer = (*recordingSpan)(nil)
+)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+ if !s.IsRecording() {
+ return
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.status.Code > code {
+ return
+ }
+
+ status := Status{Code: code}
+ if code == codes.Error {
+ status.Description = description
+ }
+
+ s.status = status
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+ if !s.IsRecording() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := s.tracer.provider.spanLimits.AttributeCountLimit
+ if limit == 0 {
+ // No attributes allowed.
+ s.droppedAttributes += len(attributes)
+ return
+ }
+
+ // If adding these attributes could exceed the capacity of s perform a
+ // de-duplication and truncation while adding to avoid over allocation.
+ if limit > 0 && len(s.attributes)+len(attributes) > limit {
+ s.addOverCapAttrs(limit, attributes)
+ return
+ }
+
+ // Otherwise, add without deduplication. When attributes are read they
+ // will be deduplicated, optimizing the operation.
+ s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
+ for _, a := range attributes {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.droppedAttributes++
+ continue
+ }
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ }
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+ // In order to not allocate more capacity to s.attributes than needed,
+ // prune and truncate this addition of attributes while adding.
+
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int)
+ s.dedupeAttrsFromRecord(&exists)
+
+ // Now that s.attributes is deduplicated, adding unique attributes up to
+ // the capacity of s will not over allocate s.attributes.
+ sum := len(attrs) + len(s.attributes)
+ s.attributes = slices.Grow(s.attributes, min(sum, limit))
+ for _, a := range attrs {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.droppedAttributes++
+ continue
+ }
+
+ if idx, ok := exists[a.Key]; ok {
+ // Perform all updates before dropping, even when at capacity.
+ s.attributes[idx] = a
+ continue
+ }
+
+ if len(s.attributes) >= limit {
+ // Do not just drop all of the remaining attributes, make sure
+ // updates are checked and performed.
+ s.droppedAttributes++
+ } else {
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ exists[a.Key] = len(s.attributes) - 1
+ }
+ }
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is performed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+ if limit < 0 {
+ return attr
+ }
+ switch attr.Value.Type() {
+ case attribute.STRING:
+ if v := attr.Value.AsString(); len(v) > limit {
+ return attr.Key.String(safeTruncate(v, limit))
+ }
+ case attribute.STRINGSLICE:
+ v := attr.Value.AsStringSlice()
+ for i := range v {
+ if len(v[i]) > limit {
+ v[i] = safeTruncate(v[i], limit)
+ }
+ }
+ return attr.Key.StringSlice(v)
+ }
+ return attr
+}
+
+// safeTruncate truncates the string and guarantees valid UTF-8 is returned.
+func safeTruncate(input string, limit int) string {
+ if trunc, ok := safeTruncateValidUTF8(input, limit); ok {
+ return trunc
+ }
+ trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit)
+ return trunc
+}
+
+// safeTruncateValidUTF8 returns a copy of the input string safely truncated to
+// limit. The truncation is ensured to occur at the bounds of complete UTF-8
+// characters. If invalid encoding of UTF-8 is encountered, input is returned
+// with false, otherwise, the truncated input will be returned with true.
+func safeTruncateValidUTF8(input string, limit int) (string, bool) {
+ for cnt := 0; cnt <= limit; {
+ r, size := utf8.DecodeRuneInString(input[cnt:])
+ if r == utf8.RuneError {
+ return input, false
+ }
+
+ if cnt+size > limit {
+ return input[:cnt], true
+ }
+ cnt += size
+ }
+ return input, true
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanOption currently supported is WithTimestamp which will set the
+// end time for a Span's life-cycle.
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+ // Do not start by checking if the span is being recorded which requires
+ // acquiring a lock. Make a minimal check that the span is not nil.
+ if s == nil {
+ return
+ }
+
+ // Store the end time as soon as possible to avoid artificially increasing
+ // the span's duration in case some operation below takes a while.
+ et := internal.MonotonicEndTime(s.startTime)
+
+ // Do relative expensive check now that we have an end time and see if we
+ // need to do any more processing.
+ if !s.IsRecording() {
+ return
+ }
+
+ config := trace.NewSpanEndConfig(options...)
+ if recovered := recover(); recovered != nil {
+ // Record but don't stop the panic.
+ defer panic(recovered)
+ opts := []trace.EventOption{
+ trace.WithAttributes(
+ semconv.ExceptionType(typeStr(recovered)),
+ semconv.ExceptionMessage(fmt.Sprint(recovered)),
+ ),
+ }
+
+ if config.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+ }
+
+ if s.executionTracerTaskEnd != nil {
+ s.executionTracerTaskEnd()
+ }
+
+ s.mu.Lock()
+ // Setting endTime to non-zero marks the span as ended and not recording.
+ if config.Timestamp().IsZero() {
+ s.endTime = et
+ } else {
+ s.endTime = config.Timestamp()
+ }
+ s.mu.Unlock()
+
+ sps := s.tracer.provider.getSpanProcessors()
+ if len(sps) == 0 {
+ return
+ }
+ snap := s.snapshot()
+ for _, sp := range sps {
+ sp.sp.OnEnd(snap)
+ }
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil || !s.IsRecording() {
+ return
+ }
+
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ ))
+
+ c := trace.NewEventConfig(opts...)
+ if c.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+ stackTrace := make([]byte, 2048)
+ n := runtime.Stack(stackTrace, false)
+
+ return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+ if !s.IsRecording() {
+ return
+ }
+ s.addEvent(name, o...)
+}
+
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+ c := trace.NewEventConfig(o...)
+ e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ e.DroppedAttributeCount = len(e.Attributes)
+ e.Attributes = nil
+ } else if limit > 0 && len(e.Attributes) > limit {
+ // Drop over capacity.
+ e.DroppedAttributeCount = len(e.Attributes) - limit
+ e.Attributes = e.Attributes[:limit]
+ }
+
+ s.mu.Lock()
+ s.events.add(e)
+ s.mu.Unlock()
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+ if !s.IsRecording() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.dedupeAttrs()
+ return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int)
+ s.dedupeAttrsFromRecord(&exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
+ // Use the fact that slices share the same backing array.
+ unique := s.attributes[:0]
+ for _, a := range s.attributes {
+ if idx, ok := (*record)[a.Key]; ok {
+ unique[idx] = a
+ } else {
+ unique = append(unique, a)
+ (*record)[a.Key] = len(unique) - 1
+ }
+ }
+ // s.attributes have element types of attribute.KeyValue. These types are
+ // not pointers and they themselves do not contain pointer fields,
+ // therefore the duplicate values do not need to be zeroed for them to be
+ // garbage collected.
+ s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.links.queue) == 0 {
+ return []Link{}
+ }
+ return s.interfaceArrayToLinksArray()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.events.queue) == 0 {
+ return []Event{}
+ }
+ return s.interfaceArrayToEventArray()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) AddLink(link trace.Link) {
+ if !s.IsRecording() {
+ return
+ }
+ if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
+ link.SpanContext.TraceState().Len() == 0 {
+ return
+ }
+
+ l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ l.DroppedAttributeCount = len(l.Attributes)
+ l.Attributes = nil
+ } else if limit > 0 && len(l.Attributes) > limit {
+ l.DroppedAttributeCount = len(l.Attributes) - limit
+ l.Attributes = l.Attributes[:limit]
+ }
+
+ s.mu.Lock()
+ s.links.add(l)
+ s.mu.Unlock()
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+ return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+ var sd snapshot
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ sd.endTime = s.endTime
+ sd.instrumentationScope = s.tracer.instrumentationScope
+ sd.name = s.name
+ sd.parent = s.parent
+ sd.resource = s.tracer.provider.resource
+ sd.spanContext = s.spanContext
+ sd.spanKind = s.spanKind
+ sd.startTime = s.startTime
+ sd.status = s.status
+ sd.childSpanCount = s.childSpanCount
+
+ if len(s.attributes) > 0 {
+ s.dedupeAttrs()
+ sd.attributes = s.attributes
+ }
+ sd.droppedAttributeCount = s.droppedAttributes
+ if len(s.events.queue) > 0 {
+ sd.events = s.interfaceArrayToEventArray()
+ sd.droppedEventCount = s.events.droppedCount
+ }
+ if len(s.links.queue) > 0 {
+ sd.links = s.interfaceArrayToLinksArray()
+ sd.droppedLinkCount = s.links.droppedCount
+ }
+ return &sd
+}
+
+func (s *recordingSpan) interfaceArrayToLinksArray() []Link {
+ linkArr := make([]Link, 0)
+ for _, value := range s.links.queue {
+ linkArr = append(linkArr, value.(Link))
+ }
+ return linkArr
+}
+
+func (s *recordingSpan) interfaceArrayToEventArray() []Event {
+ eventArr := make([]Event, 0)
+ for _, value := range s.events.queue {
+ eventArr = append(eventArr, value.(Event))
+ }
+ return eventArr
+}
+
+func (s *recordingSpan) addChild() {
+ if !s.IsRecording() {
+ return
+ }
+ s.mu.Lock()
+ s.childSpanCount++
+ s.mu.Unlock()
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+ if !rt.IsEnabled() {
+ // Avoid additional overhead if runtime/trace is not enabled.
+ return ctx
+ }
+ nctx, task := rt.NewTask(ctx, s.name)
+
+ s.mu.Lock()
+ s.executionTracerTaskEnd = task.End
+ s.mu.Unlock()
+
+ return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+ embedded.Span
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+ sc trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+ return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+ return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+ // Code is an identifier of a Spans state classification.
+ Code codes.Code
+ // Description is a user hint about why that status was set. It is only
+ // applicable when Code is Error.
+ Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 0000000000..6bdda3d94a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ExportSpans exports a batch of spans.
+ //
+ // This function is called synchronously, so there is no concurrency
+ // safety requirement. However, due to the synchronous calling pattern,
+ // it is critical that all timeouts and cancellations contained in the
+ // passed context must be honored.
+ //
+ // Any retry logic must be contained in this function. The SDK that
+ // calls this function will not implement any retry logic. All errors
+ // returned by this function are considered unrecoverable and will be
+ // reported to a configured error Handler.
+ ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown notifies the exporter of a pending halt to operations. The
+ // exporter is expected to perform any cleanup or synchronization it
+ // requires while honoring all timeouts and cancellations contained in
+ // the passed context.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 0000000000..bec5e20978
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+ // DefaultAttributeValueLengthLimit is the default maximum allowed
+ // attribute value length, unlimited.
+ DefaultAttributeValueLengthLimit = -1
+
+ // DefaultAttributeCountLimit is the default maximum number of attributes
+ // a span can have.
+ DefaultAttributeCountLimit = 128
+
+ // DefaultEventCountLimit is the default maximum number of events a span
+ // can have.
+ DefaultEventCountLimit = 128
+
+ // DefaultLinkCountLimit is the default maximum number of links a span can
+ // have.
+ DefaultLinkCountLimit = 128
+
+ // DefaultAttributePerEventCountLimit is the default maximum number of
+ // attributes a span event can have.
+ DefaultAttributePerEventCountLimit = 128
+
+ // DefaultAttributePerLinkCountLimit is the default maximum number of
+ // attributes a span link can have.
+ DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+ // AttributeValueLengthLimit is the maximum allowed attribute value length.
+ //
+ // This limit only applies to string and string slice attribute values.
+ // Any string longer than this value will be truncated to this length.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeValueLengthLimit int
+
+ // AttributeCountLimit is the maximum allowed span attribute count. Any
+ // attribute added to a span once this limit is reached will be dropped.
+ //
+ // Setting this to zero means no attributes will be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeCountLimit int
+
+ // EventCountLimit is the maximum allowed span event count. Any event
+ // added to a span once this limit is reached means it will be added but
+ // the oldest event will be dropped.
+ //
+ // Setting this to zero means no events we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ EventCountLimit int
+
+ // LinkCountLimit is the maximum allowed span link count. Any link added
+ // to a span once this limit is reached means it will be added but the
+ // oldest link will be dropped.
+ //
+ // Setting this to zero means no links we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ LinkCountLimit int
+
+ // AttributePerEventCountLimit is the maximum number of attributes allowed
+ // per span event. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for events.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerEventCountLimit int
+
+ // AttributePerLinkCountLimit is the maximum number of attributes allowed
+ // per span link. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for links.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+ return SpanLimits{
+ AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+ AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit),
+ EventCountLimit: env.SpanEventCount(DefaultEventCountLimit),
+ LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit),
+ AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+ AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 0000000000..af7f9177fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnStart is called when a span is started. It is called synchronously
+ // and should not block.
+ OnStart(parent context.Context, s ReadWriteSpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnEnd is called when span is finished. It is called synchronously and
+ // hence not block.
+ OnEnd(s ReadOnlySpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the processor should be done in this call.
+ //
+ // Calls to OnStart, OnEnd, or ForceFlush after this has been called
+ // should be ignored.
+ //
+ // All timeouts and cancellations contained in ctx must be honored, this
+ // should not block indefinitely.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush exports all ended spans to the configured Exporter that have not yet
+ // been exported. It should only be called when absolutely necessary, such as when
+ // using a FaaS provider that may suspend the process after an invocation, but before
+ // the Processor can export the completed spans.
+ ForceFlush(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+ sp SpanProcessor
+ state sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+ return &spanProcessorState{sp: sp}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 0000000000..3668b1387d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+type tracer struct {
+ embedded.Tracer
+
+ provider *TracerProvider
+ instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
+ config := trace.NewSpanStartConfig(options...)
+
+ if ctx == nil {
+ // Prevent trace.ContextWithSpan from panicking.
+ ctx = context.Background()
+ }
+
+ // For local spans created by this SDK, track child span count.
+ if p := trace.SpanFromContext(ctx); p != nil {
+ if sdkSpan, ok := p.(*recordingSpan); ok {
+ sdkSpan.addChild()
+ }
+ }
+
+ s := tr.newSpan(ctx, name, &config)
+ if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+ sps := tr.provider.getSpanProcessors()
+ for _, sp := range sps {
+ sp.sp.OnStart(ctx, rw)
+ }
+ }
+ if rtt, ok := s.(runtimeTracer); ok {
+ ctx = rtt.runtimeTrace(ctx)
+ }
+
+ return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+ // runtimeTrace starts a "runtime/trace".Task for the span and
+ // returns a context containing the task.
+ runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+ // If told explicitly to make this a new root use a zero value SpanContext
+ // as a parent which contains an invalid trace ID and is not remote.
+ var psc trace.SpanContext
+ if config.NewRoot() {
+ ctx = trace.ContextWithSpanContext(ctx, psc)
+ } else {
+ psc = trace.SpanContextFromContext(ctx)
+ }
+
+ // If there is a valid parent trace ID, use it to ensure the continuity of
+ // the trace. Always generate a new span ID so other components can rely
+ // on a unique span ID, even if the Span is non-recording.
+ var tid trace.TraceID
+ var sid trace.SpanID
+ if !psc.TraceID().IsValid() {
+ tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+ } else {
+ tid = psc.TraceID()
+ sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+ }
+
+ samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+ ParentContext: ctx,
+ TraceID: tid,
+ Name: name,
+ Kind: config.SpanKind(),
+ Attributes: config.Attributes(),
+ Links: config.Links(),
+ })
+
+ scc := trace.SpanContextConfig{
+ TraceID: tid,
+ SpanID: sid,
+ TraceState: samplingResult.Tracestate,
+ }
+ if isSampled(samplingResult) {
+ scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+ } else {
+ scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+ }
+ sc := trace.NewSpanContext(scc)
+
+ if !isRecording(samplingResult) {
+ return tr.newNonRecordingSpan(sc)
+ }
+ return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
+ startTime := config.Timestamp()
+ if startTime.IsZero() {
+ startTime = time.Now()
+ }
+
+ s := &recordingSpan{
+ // Do not pre-allocate the attributes slice here! Doing so will
+ // allocate memory that is likely never going to be used, or if used,
+ // will be over-sized. The default Go compiler has been tested to
+ // dynamically allocate needed space very well. Benchmarking has shown
+ // it to be more performant than what we can predetermine here,
+ // especially for the common use case of few to no added
+ // attributes.
+
+ parent: psc,
+ spanContext: sc,
+ spanKind: trace.ValidateSpanKind(config.SpanKind()),
+ name: name,
+ startTime: startTime,
+ events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
+ links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
+ tracer: tr,
+ }
+
+ for _, l := range config.Links() {
+ s.AddLink(l)
+ }
+
+ s.SetAttributes(sr.Attributes...)
+ s.SetAttributes(config.Attributes()...)
+
+ return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+ return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md
new file mode 100644
index 0000000000..0678d6564f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/README.md
@@ -0,0 +1,3 @@
+# SDK Trace test
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace/tracetest)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace/tracetest)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
new file mode 100644
index 0000000000..07117495a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
@@ -0,0 +1,74 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package tracetest is a testing helper package for the SDK. User can
+// configure no-op or in-memory exporters to verify different SDK behaviors or
+// custom instrumentation.
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var _ trace.SpanExporter = (*NoopExporter)(nil)
+
+// NewNoopExporter returns a new no-op exporter.
+func NewNoopExporter() *NoopExporter {
+ return new(NoopExporter)
+}
+
+// NoopExporter is an exporter that drops all received spans and performs no
+// action.
+type NoopExporter struct{}
+
+// ExportSpans handles export of spans by dropping them.
+func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil }
+
+// Shutdown stops the exporter by doing nothing.
+func (nsb *NoopExporter) Shutdown(context.Context) error { return nil }
+
+var _ trace.SpanExporter = (*InMemoryExporter)(nil)
+
+// NewInMemoryExporter returns a new InMemoryExporter.
+func NewInMemoryExporter() *InMemoryExporter {
+ return new(InMemoryExporter)
+}
+
+// InMemoryExporter is an exporter that stores all received spans in-memory.
+type InMemoryExporter struct {
+ mu sync.Mutex
+ ss SpanStubs
+}
+
+// ExportSpans handles export of spans by storing them in memory.
+func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...)
+ return nil
+}
+
+// Shutdown stops the exporter by clearing spans held in memory.
+func (imsb *InMemoryExporter) Shutdown(context.Context) error {
+ imsb.Reset()
+ return nil
+}
+
+// Reset the current in-memory storage.
+func (imsb *InMemoryExporter) Reset() {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ imsb.ss = nil
+}
+
+// GetSpans returns the current in-memory stored spans.
+func (imsb *InMemoryExporter) GetSpans() SpanStubs {
+ imsb.mu.Lock()
+ defer imsb.mu.Unlock()
+ ret := make(SpanStubs, len(imsb.ss))
+ copy(ret, imsb.ss)
+ return ret
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
new file mode 100644
index 0000000000..7aababbbf2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "context"
+ "sync"
+
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+// SpanRecorder records started and ended spans.
+type SpanRecorder struct {
+ startedMu sync.RWMutex
+ started []sdktrace.ReadWriteSpan
+
+ endedMu sync.RWMutex
+ ended []sdktrace.ReadOnlySpan
+}
+
+var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil)
+
+// NewSpanRecorder returns a new initialized SpanRecorder.
+func NewSpanRecorder() *SpanRecorder {
+ return new(SpanRecorder)
+}
+
+// OnStart records started spans.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) {
+ sr.startedMu.Lock()
+ defer sr.startedMu.Unlock()
+ sr.started = append(sr.started, s)
+}
+
+// OnEnd records completed spans.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) {
+ sr.endedMu.Lock()
+ defer sr.endedMu.Unlock()
+ sr.ended = append(sr.ended, s)
+}
+
+// Shutdown does nothing.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Shutdown(context.Context) error {
+ return nil
+}
+
+// ForceFlush does nothing.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) ForceFlush(context.Context) error {
+ return nil
+}
+
+// Started returns a copy of all started spans that have been recorded.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan {
+ sr.startedMu.RLock()
+ defer sr.startedMu.RUnlock()
+ dst := make([]sdktrace.ReadWriteSpan, len(sr.started))
+ copy(dst, sr.started)
+ return dst
+}
+
+// Ended returns a copy of all ended spans that have been recorded.
+//
+// This method is safe to be called concurrently.
+func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan {
+ sr.endedMu.RLock()
+ defer sr.endedMu.RUnlock()
+ dst := make([]sdktrace.ReadOnlySpan, len(sr.ended))
+ copy(dst, sr.ended)
+ return dst
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
new file mode 100644
index 0000000000..0a641f9488
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
@@ -0,0 +1,157 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SpanStubs is a slice of SpanStub use for testing an SDK.
+type SpanStubs []SpanStub
+
+// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro.
+func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs {
+ if len(ro) == 0 {
+ return nil
+ }
+
+ s := make(SpanStubs, 0, len(ro))
+ for _, r := range ro {
+ s = append(s, SpanStubFromReadOnlySpan(r))
+ }
+
+ return s
+}
+
+// Snapshots returns s as a slice of ReadOnlySpans.
+func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan {
+ if len(s) == 0 {
+ return nil
+ }
+
+ ro := make([]tracesdk.ReadOnlySpan, len(s))
+ for i := 0; i < len(s); i++ {
+ ro[i] = s[i].Snapshot()
+ }
+ return ro
+}
+
+// SpanStub is a stand-in for a Span.
+type SpanStub struct {
+ Name string
+ SpanContext trace.SpanContext
+ Parent trace.SpanContext
+ SpanKind trace.SpanKind
+ StartTime time.Time
+ EndTime time.Time
+ Attributes []attribute.KeyValue
+ Events []tracesdk.Event
+ Links []tracesdk.Link
+ Status tracesdk.Status
+ DroppedAttributes int
+ DroppedEvents int
+ DroppedLinks int
+ ChildSpanCount int
+ Resource *resource.Resource
+ InstrumentationLibrary instrumentation.Library
+}
+
+// SpanStubFromReadOnlySpan returns a SpanStub populated from ro.
+func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub {
+ if ro == nil {
+ return SpanStub{}
+ }
+
+ return SpanStub{
+ Name: ro.Name(),
+ SpanContext: ro.SpanContext(),
+ Parent: ro.Parent(),
+ SpanKind: ro.SpanKind(),
+ StartTime: ro.StartTime(),
+ EndTime: ro.EndTime(),
+ Attributes: ro.Attributes(),
+ Events: ro.Events(),
+ Links: ro.Links(),
+ Status: ro.Status(),
+ DroppedAttributes: ro.DroppedAttributes(),
+ DroppedEvents: ro.DroppedEvents(),
+ DroppedLinks: ro.DroppedLinks(),
+ ChildSpanCount: ro.ChildSpanCount(),
+ Resource: ro.Resource(),
+ InstrumentationLibrary: ro.InstrumentationScope(),
+ }
+}
+
+// Snapshot returns a read-only copy of the SpanStub.
+func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan {
+ return spanSnapshot{
+ name: s.Name,
+ spanContext: s.SpanContext,
+ parent: s.Parent,
+ spanKind: s.SpanKind,
+ startTime: s.StartTime,
+ endTime: s.EndTime,
+ attributes: s.Attributes,
+ events: s.Events,
+ links: s.Links,
+ status: s.Status,
+ droppedAttributes: s.DroppedAttributes,
+ droppedEvents: s.DroppedEvents,
+ droppedLinks: s.DroppedLinks,
+ childSpanCount: s.ChildSpanCount,
+ resource: s.Resource,
+ instrumentationScope: s.InstrumentationLibrary,
+ }
+}
+
+type spanSnapshot struct {
+ // Embed the interface to implement the private method.
+ tracesdk.ReadOnlySpan
+
+ name string
+ spanContext trace.SpanContext
+ parent trace.SpanContext
+ spanKind trace.SpanKind
+ startTime time.Time
+ endTime time.Time
+ attributes []attribute.KeyValue
+ events []tracesdk.Event
+ links []tracesdk.Link
+ status tracesdk.Status
+ droppedAttributes int
+ droppedEvents int
+ droppedLinks int
+ childSpanCount int
+ resource *resource.Resource
+ instrumentationScope instrumentation.Scope
+}
+
+func (s spanSnapshot) Name() string { return s.name }
+func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext }
+func (s spanSnapshot) Parent() trace.SpanContext { return s.parent }
+func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind }
+func (s spanSnapshot) StartTime() time.Time { return s.startTime }
+func (s spanSnapshot) EndTime() time.Time { return s.endTime }
+func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes }
+func (s spanSnapshot) Links() []tracesdk.Link { return s.links }
+func (s spanSnapshot) Events() []tracesdk.Event { return s.events }
+func (s spanSnapshot) Status() tracesdk.Status { return s.status }
+func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes }
+func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks }
+func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents }
+func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount }
+func (s spanSnapshot) Resource() *resource.Resource { return s.resource }
+func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
+ return s.instrumentationScope
+}
+
+func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library {
+ return s.instrumentationScope
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
new file mode 100644
index 0000000000..b84dd2c5ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.16.0-rc.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
new file mode 100644
index 0000000000..f0d8fc51a2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk // import "go.opentelemetry.io/otel/sdk"
+
+// Version is the current release version of the OpenTelemetry SDK in use.
+func Version() string {
+ return "1.27.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
new file mode 100644
index 0000000000..ada857995d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
@@ -0,0 +1,327 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal"
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// SemanticConventions are the semantic convention values defined for a
+// version of the OpenTelemetry specification.
+type SemanticConventions struct {
+ EnduserIDKey attribute.Key
+ HTTPClientIPKey attribute.Key
+ HTTPFlavorKey attribute.Key
+ HTTPHostKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPServerNameKey attribute.Key
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ HTTPUserAgentKey attribute.Key
+ NetHostIPKey attribute.Key
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerIPKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetTransportIP attribute.KeyValue
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportUnix attribute.KeyValue
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span. The network parameter is a string that net.Dial function
+// from standard library can understand.
+func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{}
+
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ attrs = append(attrs, sc.NetTransportTCP)
+ case "udp", "udp4", "udp6":
+ attrs = append(attrs, sc.NetTransportUDP)
+ case "ip", "ip4", "ip6":
+ attrs = append(attrs, sc.NetTransportIP)
+ case "unix", "unixgram", "unixpacket":
+ attrs = append(attrs, sc.NetTransportUnix)
+ default:
+ attrs = append(attrs, sc.NetTransportOther)
+ }
+
+ peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr)
+ if peerIP != "" {
+ attrs = append(attrs, sc.NetPeerIPKey.String(peerIP))
+ }
+ if peerName != "" {
+ attrs = append(attrs, sc.NetPeerNameKey.String(peerName))
+ }
+ if peerPort != 0 {
+ attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort))
+ }
+
+ hostIP, hostName, hostPort := "", "", 0
+ for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} {
+ hostIP, hostName, hostPort = hostIPNamePort(someHost)
+ if hostIP != "" || hostName != "" || hostPort != 0 {
+ break
+ }
+ }
+ if hostIP != "" {
+ attrs = append(attrs, sc.NetHostIPKey.String(hostIP))
+ }
+ if hostName != "" {
+ attrs = append(attrs, sc.NetHostNameKey.String(hostName))
+ }
+ if hostPort != 0 {
+ attrs = append(attrs, sc.NetHostPortKey.Int(hostPort))
+ }
+
+ return attrs
+}
+
+// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort.
+// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized
+// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the
+// host portion will instead be returned in `name`.
+func hostIPNamePort(hostWithPort string) (ip string, name string, port int) {
+ var (
+ hostPart, portPart string
+ parsedPort uint64
+ err error
+ )
+ if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil {
+ hostPart, portPart = hostWithPort, ""
+ }
+ if parsedIP := net.ParseIP(hostPart); parsedIP != nil {
+ ip = parsedIP.String()
+ } else {
+ name = hostPart
+ }
+ if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil {
+ port = int(parsedPort)
+ }
+ return
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ if username, _, ok := request.BasicAuth(); ok {
+ return []attribute.KeyValue{sc.EnduserIDKey.String(username)}
+ }
+ return nil
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{}
+
+ // remove any username/password info that may be in the URL
+ // before adding it to the attributes
+ userinfo := request.URL.User
+ request.URL.User = nil
+
+ attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String()))
+
+ // restore any username/password info that was removed
+ request.URL.User = userinfo
+
+ return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
+}
+
+func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{}
+ if ua := request.UserAgent(); ua != "" {
+ attrs = append(attrs, sc.HTTPUserAgentKey.String(ua))
+ }
+ if request.ContentLength > 0 {
+ attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength))
+ }
+
+ return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
+}
+
+func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality
+ attrs := []attribute.KeyValue{}
+
+ if request.TLS != nil {
+ attrs = append(attrs, sc.HTTPSchemeHTTPS)
+ } else {
+ attrs = append(attrs, sc.HTTPSchemeHTTP)
+ }
+
+ if request.Host != "" {
+ attrs = append(attrs, sc.HTTPHostKey.String(request.Host))
+ } else if request.URL != nil && request.URL.Host != "" {
+ attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host))
+ }
+
+ flavor := ""
+ if request.ProtoMajor == 1 {
+ flavor = fmt.Sprintf("1.%d", request.ProtoMinor)
+ } else if request.ProtoMajor == 2 {
+ flavor = "2"
+ }
+ if flavor != "" {
+ attrs = append(attrs, sc.HTTPFlavorKey.String(flavor))
+ }
+
+ if request.Method != "" {
+ attrs = append(attrs, sc.HTTPMethodKey.String(request.Method))
+ } else {
+ attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet))
+ }
+
+ return attrs
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{}
+ if serverName != "" {
+ attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
+ }
+ return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{
+ sc.HTTPTargetKey.String(request.RequestURI),
+ }
+
+ if serverName != "" {
+ attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
+ }
+ if route != "" {
+ attrs = append(attrs, sc.HTTPRouteKey.String(route))
+ }
+ if values := request.Header["X-Forwarded-For"]; len(values) > 0 {
+ addr := values[0]
+ if i := strings.Index(addr, ","); i > 0 {
+ addr = addr[:i]
+ }
+ attrs = append(attrs, sc.HTTPClientIPKey.String(addr))
+ }
+
+ return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+ attrs := []attribute.KeyValue{
+ sc.HTTPStatusCodeKey.Int(code),
+ }
+ return attrs
+}
+
+type codeRange struct {
+ fromInclusive int
+ toInclusive int
+}
+
+func (r codeRange) contains(code int) bool {
+ return r.fromInclusive <= code && code <= r.toInclusive
+}
+
+var validRangesPerCategory = map[int][]codeRange{
+ 1: {
+ {http.StatusContinue, http.StatusEarlyHints},
+ },
+ 2: {
+ {http.StatusOK, http.StatusAlreadyReported},
+ {http.StatusIMUsed, http.StatusIMUsed},
+ },
+ 3: {
+ {http.StatusMultipleChoices, http.StatusUseProxy},
+ {http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
+ },
+ 4: {
+ {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
+ {http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
+ {http.StatusPreconditionRequired, http.StatusTooManyRequests},
+ {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
+ {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
+ },
+ 5: {
+ {http.StatusInternalServerError, http.StatusLoopDetected},
+ {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
+ },
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+ spanCode, valid := validateHTTPStatusCode(code)
+ if !valid {
+ return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ return spanCode, ""
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+ spanCode, valid := validateHTTPStatusCode(code)
+ if !valid {
+ return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ category := code / 100
+ if spanKind == trace.SpanKindServer && category == 4 {
+ return codes.Unset, ""
+ }
+ return spanCode, ""
+}
+
+// validateHTTPStatusCode validates the HTTP status code and returns
+// corresponding span status code. If the `code` is not a valid HTTP status
+// code, returns span status Error and false.
+func validateHTTPStatusCode(code int) (codes.Code, bool) {
+ category := code / 100
+ ranges, ok := validRangesPerCategory[category]
+ if !ok {
+ return codes.Error, false
+ }
+ ok = false
+ for _, crange := range ranges {
+ ok = crange.contains(code)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ return codes.Error, false
+ }
+ if category > 0 && category < 4 {
+ return codes.Unset, true
+ }
+ return codes.Error, true
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md
new file mode 100644
index 0000000000..c692442c37
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.10.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.10.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.10.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
new file mode 100644
index 0000000000..60e7be59f0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.10.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
new file mode 100644
index 0000000000..3c042d4f99
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
new file mode 100644
index 0000000000..f083085868
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/semconv/internal"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
+
+var sc = &internal.SemanticConventions{
+ EnduserIDKey: EnduserIDKey,
+ HTTPClientIPKey: HTTPClientIPKey,
+ HTTPFlavorKey: HTTPFlavorKey,
+ HTTPHostKey: HTTPHostKey,
+ HTTPMethodKey: HTTPMethodKey,
+ HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
+ HTTPRouteKey: HTTPRouteKey,
+ HTTPSchemeHTTP: HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: HTTPSchemeHTTPS,
+ HTTPServerNameKey: HTTPServerNameKey,
+ HTTPStatusCodeKey: HTTPStatusCodeKey,
+ HTTPTargetKey: HTTPTargetKey,
+ HTTPURLKey: HTTPURLKey,
+ HTTPUserAgentKey: HTTPUserAgentKey,
+ NetHostIPKey: NetHostIPKey,
+ NetHostNameKey: NetHostNameKey,
+ NetHostPortKey: NetHostPortKey,
+ NetPeerIPKey: NetPeerIPKey,
+ NetPeerNameKey: NetPeerNameKey,
+ NetPeerPortKey: NetPeerPortKey,
+ NetTransportIP: NetTransportIP,
+ NetTransportOther: NetTransportOther,
+ NetTransportTCP: NetTransportTCP,
+ NetTransportUDP: NetTransportUDP,
+ NetTransportUnix: NetTransportUnix,
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span. The network parameter is a string that net.Dial function
+// from standard library can understand.
+func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+ return sc.NetAttributesFromHTTPRequest(network, request)
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ return sc.EndUserAttributesFromHTTPRequest(request)
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ return sc.HTTPClientAttributesFromHTTPRequest(request)
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+ return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+ return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+ return sc.HTTPAttributesFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+ return internal.SpanStatusFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+ return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
new file mode 100644
index 0000000000..27c52f4b16
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
@@ -0,0 +1,970 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // Name of the cloud provider.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+ // The cloud account ID the resource is assigned to.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+ // The geographical region the resource is running.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
+ // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
+ // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
+ // us/global-infrastructure/geographies/), [Google Cloud
+ // regions](https://cloud.google.com/about/locations), or [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+ // Cloud regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the resource
+ // is running.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+ // The cloud platform in use.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
+ // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+ // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
+ // perguide/clusters.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+ // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
+ // aunch_types.html) for an ECS task.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+ // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
+ // t/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+ // The task definition family this task definition is a member of.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+ // The revision for this task definition.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // The ARN of an EKS cluster.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// Resources specific to Amazon Web Services.
+const (
+ // The name(s) of the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each write
+ // to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+ // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+ // The name(s) of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+ // The ARN(s) of the AWS log stream(s).
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
+ // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
+ // several log streams, so these ARNs necessarily identify both a log group and a
+ // log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// A container instance.
+const (
+ // Container name used by container runtime.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+ // Container ID. Usually a UUID, as for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-
+ // identification). The UUID might be abbreviated.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+ // The container runtime managing this container.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+ // Name of the image the container was built on.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+ // Container image tag.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// The software deployment.
+const (
+ // Name of the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// The device on which the process represented by this resource is running.
+const (
+ // A unique identifier representing the device
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values outlined
+ // below. This value is not an advertising identifier and MUST NOT be used as
+ // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
+ // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
+ // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
+ // Firebase Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on best
+ // practices and exact implementation details. Caution should be taken when
+ // storing personal data or anything which can identify a user. GDPR and data
+ // protection laws may apply, ensure you do your own due diligence.
+ DeviceIDKey = attribute.Key("device.id")
+ // The model identifier for the device
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version of the
+ // model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+ // The marketing name for the device model
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of the
+ // device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+ // The name of the device manufacturer
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// A serverless instance.
+const (
+ // The name of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback function (which
+ // may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
+ // general.md#source-code-attributes) span attributes).
+ FaaSNameKey = attribute.Key("faas.name")
+ // The unique ID of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: Depending on the cloud provider, use:
+
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
+ // namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // aliases.html) with the resolved function version, as the same runtime instance
+ // may be invokable with multiple
+ // different aliases.
+ // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
+ // resource-names)
+ // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
+ // us/rest/api/resources/resources/get-by-id).
+
+ // On some providers, it may not be possible to determine the full ID at startup,
+ // which is why this field cannot be made required. For example, on AWS the
+ // account ID
+ // part of the ARN is not available without calling another AWS API
+ // which may be deemed too slow for a short-running lambda function.
+ // As an alternative, consider setting `faas.id` as a span attribute instead.
+ FaaSIDKey = attribute.Key("faas.id")
+ // The immutable version of the function being executed.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-
+ // var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+ // The execution environment ID as a string, that will be potentially reused for
+ // other invocations to the same function/function version.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+ // The amount of memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// A host is defined as a general computing instance.
+const (
+ // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
+ // provider.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostIDKey = attribute.Key("host.id")
+ // Name of the host. On Unix systems, it may contain what the hostname command
+ // returns, or the fully qualified hostname, or another name specified by the
+ // user.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+ // Type of host. For Cloud, this must be the machine type.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+ // The CPU architecture the host system is running on.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+ // Name of the VM image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+ // VM image ID. For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+ // The version string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// A Kubernetes Cluster.
+const (
+ // The name of the cluster.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// A Kubernetes Node object.
+const (
+ // The name of the Node.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+ // The UID of the Node.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// A Kubernetes Namespace.
+const (
+ // The name of the namespace that the pod is running in.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// A Kubernetes Pod object.
+const (
+ // The UID of the Pod.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+ // The name of the Pod.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // The name of the Container from Pod specification, must be unique within a Pod.
+ // Container runtime usually uses different globally unique name
+ // (`container.name`).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+ // Number of times the container was restarted. This attribute can be used to
+ // identify a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// A Kubernetes ReplicaSet object.
+const (
+ // The UID of the ReplicaSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+ // The name of the ReplicaSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// A Kubernetes Deployment object.
+const (
+ // The UID of the Deployment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+ // The name of the Deployment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// A Kubernetes StatefulSet object.
+const (
+ // The UID of the StatefulSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+ // The name of the StatefulSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// A Kubernetes DaemonSet object.
+const (
+ // The UID of the DaemonSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+ // The name of the DaemonSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// A Kubernetes Job object.
+const (
+ // The UID of the Job.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+ // The name of the Job.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// A Kubernetes CronJob object.
+const (
+ // The UID of the CronJob.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+ // The name of the CronJob.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// The operating system (OS) on which the process represented by this resource is running.
+const (
+ // The operating system type.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+ // Human readable (not intended to be parsed) OS version information, like e.g.
+ // reported by `ver` or `lsb_release -a` commands.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+ // Human readable operating system name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+ // The version string of the operating system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// An operating system process.
+const (
+ // Process identifier (PID).
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+ // The name of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+ // The full path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+ // The command used to launch the process (i.e. the command name). On Linux based
+ // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
+ // can be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+ // The full command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
+ // set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+ // All the command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited strings
+ // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
+ // the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // Required: See below
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+ // The username of the user that owns the process.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// The single (language) runtime instance which is monitored.
+const (
+ // The name of the runtime of this process. For compiled native binaries, this
+ // SHOULD be the name of the compiler.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+ // The version of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+ // An additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// A service instance.
+const (
+ // Logical name of the service.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available, the
+ // value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+ // A namespace for `service.name`.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace defined
+ // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
+ // length namespace string is assumed equal to unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+ // The string ID of the service instance.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to distinguish instances of the same service that exist
+ // at the same time (e.g. instances of a horizontally scaled service). It is
+ // preferable for the ID to be persistent and stay the same for the lifetime of
+ // the service instance, however it is acceptable that the ID is ephemeral and
+ // changes during important lifetime events for the service (e.g. service
+ // restarts). If the service has no inherent unique ID that can be used as the
+ // value of this attribute it is recommended to generate a random Version 1 or
+ // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+ // The version string of the service API or implementation.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// The telemetry SDK used to capture data recorded by the instrumentation libraries.
+const (
+ // The name of the telemetry SDK as defined above.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+ // The language of the telemetry SDK.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+ // The version string of the telemetry SDK.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+ // The version string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
+const (
+ // The name of the web engine.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+ // The version of the web engine.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+ // Additional description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
new file mode 100644
index 0000000000..9eebb78ced
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
new file mode 100644
index 0000000000..001d5cbf30
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
@@ -0,0 +1,1689 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
+const (
+ // The full invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
+ // applicable).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
+const (
+ // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
+ // .md#id) uniquely identifies the event.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+ // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
+ // d#source-1) identifies the context in which an event happened.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
+ // service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+ // The [version of the CloudEvents specification](https://github.com/cloudevents/s
+ // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+ // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
+ // ec.md#type) contains a value describing the type of event related to the
+ // originating occurrence.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+ // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
+ // md#subject) of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// This document defines semantic conventions for the OpenTracing Shim
+const (
+ // Parent-child Reference type
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// This document defines the attributes used to perform database client calls.
+const (
+ // An identifier for the database management system (DBMS) product being used. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+ // The connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+ // Username for accessing the database.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+ // The fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+ // used to connect.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+ // This attribute is used to report the name of the database being accessed. For
+ // commands that switch the database, this should be set to the target database
+ // (even if the command fails).
+ //
+ // Type: string
+ // Required: Required, if applicable.
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called "schema
+ // name". In case there are multiple layers that could be considered for database
+ // name (e.g. Oracle instance name and schema name), the database name to be used
+ // is the more specific layer (e.g. Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+ // The database statement being executed.
+ //
+ // Type: string
+ // Required: Required if applicable and not explicitly disabled via
+ // instrumentation configuration.
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+ // The name of the operation being executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // Required: Required, if `db.statement` is not applicable.
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to attempt any
+ // client-side parsing of `db.statement` just to get this property, but it should
+ // be set if the operation name is provided by the library being instrumented. If
+ // the SQL statement has an ambiguous operation, or performs more than one
+ // operation, this value may be omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+)
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
+ // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named instance.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// Call-level attributes for Cassandra
+const (
+ // The fetch size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+ // The consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-
+ // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+ // The name of the primary table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // Required: Recommended if available.
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra rather
+ // than sql. It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+ // Whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+ // The number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+ // The ID of the coordinating node for a query.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+ // The data center of the coordinating node for a query.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Call-level attributes for Redis
+const (
+ // The index of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To be used
+ // instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // Required: Required, if other than the default database (`0`).
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// Call-level attributes for MongoDB
+const (
+ // The collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// Call-level attributes for SQL databases
+const (
+ // The name of the primary table that the operation is acting upon, including the
+ // database name (if applicable).
+ //
+ // Type: string
+ // Required: Recommended if available.
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// This document defines the attributes used to report a single exception associated with a span.
+const (
+ // The type of the exception (its fully-qualified class name, if applicable). The
+ // dynamic type of the exception should be preferred over the static type in
+ // languages that support it.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+ // The exception message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+ // A stacktrace as a string in the natural representation for the language
+ // runtime. The representation is to be determined and documented by each language
+ // SIG.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+ // SHOULD be set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of a span,
+ // if that span is ended while the exception is still logically "in flight".
+ // This may be actually "in flight" in some languages (e.g. if the exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most languages.
+
+ // It is usually not possible to determine at the point where an exception is
+ // thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending the span,
+ // as done in the [example above](#recording-an-exception).
+
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
+const (
+ // Type of the trigger which caused this function execution.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+ // The execution ID of the current function execution.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
+const (
+ // The name of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
+ // DB to the database name.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+ // Describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+ // A string containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+ // The document name/table subjected to the operation. For example, in Cloud
+ // Storage or S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // A string containing the function invocation time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+ // A string containing the schedule period as [Cron Expression](https://docs.oracl
+ // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // A boolean that is true if the serverless function is executed for the first
+ // time (aka cold-start).
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // The name of the invoked function.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+ // The cloud provider of the invoked function.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
+ // function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+ // The cloud region of the invoked function.
+ //
+ // Type: string
+ // Required: For some cloud providers, like AWS or GCP, the region in which a
+ // function is hosted is essential to uniquely identify the function and also part
+ // of its endpoint. Since it's part of the endpoint being called, the region is
+ // always known to clients. In these cases, `faas.invoked_region` MUST be set
+ // accordingly. If the region is unknown to the client or not required for
+ // identifying the invoked function, setting `faas.invoked_region` is optional.
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// These attributes may be used for any network related operation.
+const (
+ // Transport protocol used. See note below.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+ // Remote address of the peer (dotted decimal for IPv4 or
+ // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '127.0.0.1'
+ NetPeerIPKey = attribute.Key("net.peer.ip")
+ // Remote port number.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+ // Remote hostname or similar, see note below.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'example.com'
+ NetPeerNameKey = attribute.Key("net.peer.name")
+ // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetHostIPKey = attribute.Key("net.host.ip")
+ // Like `net.peer.port` but for the host port.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 35555
+ NetHostPortKey = attribute.Key("net.host.port")
+ // Local hostname or similar, see note below.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+ // The internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+ // This describes more details regarding the connection.type. It may be the type
+ // of cell technology connection, but it could be used for describing details
+ // about a wifi connection.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+ // The name of the mobile carrier.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+ // The mobile carrier country code.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+ // The mobile carrier network code.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+ // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Another IP-based protocol
+ NetTransportIP = NetTransportKey.String("ip")
+ // Unix Domain socket. See below
+ NetTransportUnix = NetTransportKey.String("unix")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// Operations that access some remote service.
+const (
+ // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
+ // the remote service. SHOULD be equal to the actual `service.name` resource
+ // attribute of the remote service if any.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// These attributes may be used for any operation with an authenticated and/or authorized enduser.
+const (
+ // Username or client_id extracted from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
+ // inbound request from outside the system.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+ // Actual/assumed role the client is making the request under extracted from token
+ // or application security context.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+ // Scopes or granted authorities the client currently possesses extracted from
+ // token or application security context. The value would come from the scope
+ // associated with an [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
+ // in a [SAML 2.0 Assertion](http://docs.oasis-
+ // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// These attributes may be used for any operation to store information about a thread that started a span.
+const (
+ // Current "managed" thread ID (as opposed to OS thread ID).
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+ // Current thread name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// These attributes allow to report this unit of code and therefore to provide more context about the span.
+const (
+ // The method or function name, or equivalent (usually rightmost part of the code
+ // unit's name).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+ // The "namespace" within which `code.function` is defined. Usually the qualified
+ // class or module name, such that `code.namespace` + some separator +
+ // `code.function` form a unique identifier for the code unit.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+ // The source code file name that identifies the code unit as uniquely as possible
+ // (preferably an absolute file path).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+ // The line number in `code.filepath` best representing the operation. It SHOULD
+ // point within the code unit named in `code.function`.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+)
+
+// This document defines semantic conventions for HTTP client and server Spans.
+const (
+ // HTTP request method.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+ // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
+ // Usually the fragment is not transmitted over HTTP, but if it is known, it
+ // should be included nevertheless.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the attribute's
+ // value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+ // The full request target as passed in a HTTP request line or equivalent.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds#123'
+ HTTPTargetKey = attribute.Key("http.target")
+ // The value of the [HTTP host
+ // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
+ // should also be reported, see note.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'www.example.org'
+ // Note: When the header is present but empty the attribute SHOULD be set to the
+ // empty string. Note that this is a valid situation that is expected in certain
+ // cases, according the aforementioned [section of RFC
+ // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
+ // set the attribute MUST NOT be set.
+ HTTPHostKey = attribute.Key("http.host")
+ // The URI scheme identifying the used protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+ // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // Required: If and only if one was received/sent.
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+ // Kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
+ // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+ // Value of the [HTTP User-
+ // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
+ // client.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+ // The size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+ // requests using transport encoding, this should be the compressed size.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+ // The size of the uncompressed request payload body after transport decoding. Not
+ // set if transport encoding not used.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5493
+ HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
+ // The size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+ // requests using transport encoding, this should be the compressed size.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+ // The size of the uncompressed response payload body after transport decoding.
+ // Not set if transport encoding not used.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5493
+ HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
+ // The ordinal number of request re-sending attempt.
+ //
+ // Type: int
+ // Required: If and only if a request was retried.
+ // Stability: stable
+ // Examples: 3
+ HTTPRetryCountKey = attribute.Key("http.retry_count")
+)
+
+var (
+ // HTTP 1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP 1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP 2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// Semantic Convention for HTTP Server
+const (
+ // The primary server name of the matched virtual host. This should be obtained
+ // via configuration. If no such configuration can be obtained, this attribute
+ // MUST NOT be set ( `net.host.name` should be used instead).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `http.url` is usually not readily available on the server side but would
+ // have to be assembled in a cumbersome and sometimes lossy process from other
+ // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
+ // preferred to supply the raw data that is available.
+ HTTPServerNameKey = attribute.Key("http.server_name")
+ // The matched route (path template).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/users/:userID?'
+ HTTPRouteKey = attribute.Key("http.route")
+ // The IP address of the original client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-
+ // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.peer.ip`, which would
+ // identify the network-level peer, which may be a proxy.
+
+ // This attribute should be set when a source of information different
+ // from the one used for `net.peer.ip`, is available even if that other
+ // source just confirms the same value as `net.peer.ip`.
+ // Rationale: For `net.peer.ip`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.peer.ip` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // The keys in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+ // The JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
+ // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+ // The JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+ // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // Required: No
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+ // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // Required: No
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+ // The value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+ // The value of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
+ // ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+ // The value of the `Limit` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+ // The value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+ // The value of the `IndexName` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+ // The value of the `Select` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// DynamoDB.CreateTable
+const (
+ // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
+ // field
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
+ // number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+ // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// DynamoDB.ListTables
+const (
+ // The value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+ // The number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// DynamoDB.Query
+const (
+ // The value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// DynamoDB.Scan
+const (
+ // The value of the `Segment` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+ // The value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+ // The value of the `Count` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+ // The value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// DynamoDB.UpdateTable
+const (
+ // The JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+ // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates`
+ // request field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// This document defines the attributes used in messaging systems.
+const (
+ // A string identifying the messaging system.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+ // The message destination name. This might be equal to the span name but is
+ // required nevertheless.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ MessagingDestinationKey = attribute.Key("messaging.destination")
+ // The kind of message destination
+ //
+ // Type: Enum
+ // Required: Required only if the message destination is either a `queue` or
+ // `topic`.
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
+ // A boolean that is true if the message destination is temporary.
+ //
+ // Type: boolean
+ // Required: If missing, it is assumed to be false.
+ // Stability: stable
+ MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
+ // The name of the transport protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'AMQP', 'MQTT'
+ MessagingProtocolKey = attribute.Key("messaging.protocol")
+ // The version of the transport protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.9.1'
+ MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
+ // Connection string.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'tibjmsnaming://localhost:7222',
+ // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
+ MessagingURLKey = attribute.Key("messaging.url")
+ // A value used by the messaging system as an identifier for the message,
+ // represented as a string.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message_id")
+ // The [conversation ID](#conversations) identifying the conversation to which the
+ // message belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
+ // The (uncompressed) size of the message payload in bytes. Also use this
+ // attribute if it is unknown whether the compressed or uncompressed payload size
+ // is reported.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
+ // The compressed size of the message payload in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// Semantic convention for a consumer of messages received from a messaging system
+const (
+ // A string identifying the kind of message consumption as defined in the
+ // [Operation names](#operation-names) section above. If the operation is "send",
+ // this attribute MUST NOT be set, since the operation can be inferred from the
+ // span kind in that case.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingOperationKey = attribute.Key("messaging.operation")
+ // The identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
+ // present, or only `messaging.kafka.consumer_group`. For brokers, such as
+ // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+ // message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
+)
+
+var (
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// Attributes for RabbitMQ
+const (
+ // RabbitMQ message routing key.
+ //
+ // Type: string
+ // Required: Unless it is empty.
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
+)
+
+// Attributes for Apache Kafka
+const (
+ // Message keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message_id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
+ // Name of the Kafka Consumer Group that is handling the message. Only applies to
+ // consumers, not producers.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
+ // Client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+ // Partition the message is sent to.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
+ // A boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // Required: If missing, it is assumed to be false.
+ // Stability: stable
+ MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
+)
+
+// Attributes for Apache RocketMQ
+const (
+ // Namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+ // Name of the RocketMQ producer/consumer group that is handling the message. The
+ // client type is identified by the SpanKind.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+ // The unique identifier for each client.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+ // Type of message.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
+ // The secondary classifier of message besides topic.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
+ // Key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
+ // Model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// This document defines semantic conventions for remote procedure calls.
+const (
+ // A string identifying the remoting system. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+ // The full (logical) name of the service being called, including its package
+ // name, if applicable.
+ //
+ // Type: string
+ // Required: No, but recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+ // The name of the (logical) method being called, must be equal to the $method
+ // part in the span name.
+ //
+ // Type: string
+ // Required: No, but recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the latter
+ // (e.g., method actually executing the call on the server side, RPC client stub
+ // method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// Tech-specific attributes for gRPC.
+const (
+ // The [numeric status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
+ // request.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
+ // 1.0 does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // Required: If missing, it is assumed to be "1.0".
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+ // `id` property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be cast to
+ // string for simplicity. Use empty string in case of `null` value. Omit entirely
+ // if this is a notification.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // Required: If missing, response is assumed to be successful.
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPC received/sent message.
+const (
+ // Whether this is a received or sent message.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+ // MUST be calculated as two different counters starting from `1` one for sent
+ // messages and one for received message.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+ // Compressed size of the message in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+ // Uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md
new file mode 100644
index 0000000000..6a273180fe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.12.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.12.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.12.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
new file mode 100644
index 0000000000..fc255ef05d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.12.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
new file mode 100644
index 0000000000..f0e12957e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
new file mode 100644
index 0000000000..4e19ca342c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/semconv/internal"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
+
+var sc = &internal.SemanticConventions{
+ EnduserIDKey: EnduserIDKey,
+ HTTPClientIPKey: HTTPClientIPKey,
+ HTTPFlavorKey: HTTPFlavorKey,
+ HTTPHostKey: HTTPHostKey,
+ HTTPMethodKey: HTTPMethodKey,
+ HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
+ HTTPRouteKey: HTTPRouteKey,
+ HTTPSchemeHTTP: HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: HTTPSchemeHTTPS,
+ HTTPServerNameKey: HTTPServerNameKey,
+ HTTPStatusCodeKey: HTTPStatusCodeKey,
+ HTTPTargetKey: HTTPTargetKey,
+ HTTPURLKey: HTTPURLKey,
+ HTTPUserAgentKey: HTTPUserAgentKey,
+ NetHostIPKey: NetHostIPKey,
+ NetHostNameKey: NetHostNameKey,
+ NetHostPortKey: NetHostPortKey,
+ NetPeerIPKey: NetPeerIPKey,
+ NetPeerNameKey: NetPeerNameKey,
+ NetPeerPortKey: NetPeerPortKey,
+ NetTransportIP: NetTransportIP,
+ NetTransportOther: NetTransportOther,
+ NetTransportTCP: NetTransportTCP,
+ NetTransportUDP: NetTransportUDP,
+ NetTransportUnix: NetTransportUnix,
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span. The network parameter is a string that net.Dial function
+// from standard library can understand.
+func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+ return sc.NetAttributesFromHTTPRequest(network, request)
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ return sc.EndUserAttributesFromHTTPRequest(request)
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+ return sc.HTTPClientAttributesFromHTTPRequest(request)
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+ return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+ return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+ return sc.HTTPAttributesFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+ return internal.SpanStatusFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+ return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
new file mode 100644
index 0000000000..45951685a7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
@@ -0,0 +1,1031 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device).
+const (
+ // Array of brand name and version separated by a space
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (navigator.userAgentData.brands).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+ // The platform on which the browser is running
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (navigator.userAgentData.platform). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in the
+ // [os.type and os.name attributes](./os.md). However, for consistency, the values
+ // in the `browser.platform` attribute should capture the exact value that the
+ // user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+ // Full user-agent string provided by the browser
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36
+ // (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do not
+ // have a mechanism to retrieve brands and platform individually from the User-
+ // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent`
+ // API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+)
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // Name of the cloud provider.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+ // The cloud account ID the resource is assigned to.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+ // The geographical region the resource is running.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
+ // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
+ // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
+ // us/global-infrastructure/geographies/), [Google Cloud
+ // regions](https://cloud.google.com/about/locations), or [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+ // Cloud regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the resource
+ // is running.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+ // The cloud platform in use.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
+ // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+ // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
+ // perguide/clusters.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+ // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
+ // aunch_types.html) for an ECS task.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+ // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
+ // t/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-
+ // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+ // The task definition family this task definition is a member of.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+ // The revision for this task definition.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // The ARN of an EKS cluster.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// Resources specific to Amazon Web Services.
+const (
+ // The name(s) of the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each write
+ // to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+ // The Amazon Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+ // The name(s) of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+ // The ARN(s) of the AWS log stream(s).
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
+ // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+ // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
+ // several log streams, so these ARNs necessarily identify both a log group and a
+ // log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// A container instance.
+const (
+ // Container name used by container runtime.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+ // Container ID. Usually a UUID, as for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-
+ // identification). The UUID might be abbreviated.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+ // The container runtime managing this container.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+ // Name of the image the container was built on.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+ // Container image tag.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// The software deployment.
+const (
+ // Name of the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// The device on which the process represented by this resource is running.
+const (
+ // A unique identifier representing the device
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values outlined
+ // below. This value is not an advertising identifier and MUST NOT be used as
+ // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
+ // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
+ // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
+ // Firebase Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on best
+ // practices and exact implementation details. Caution should be taken when
+ // storing personal data or anything which can identify a user. GDPR and data
+ // protection laws may apply, ensure you do your own due diligence.
+ DeviceIDKey = attribute.Key("device.id")
+ // The model identifier for the device
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version of the
+ // model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+ // The marketing name for the device model
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of the
+ // device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+ // The name of the device manufacturer
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// A serverless instance.
+const (
+ // The name of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
+ // general.md#source-code-attributes)
+ // span attributes).
+
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+ // The unique ID of the single function that this runtime instance executes.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the full ID
+ // at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+
+ // The exact value to use for `faas.id` depends on the cloud provider:
+
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
+ // namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // aliases.html)
+ // with the resolved function version, as the same runtime instance may be
+ // invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
+ // resource-names)
+ // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
+ // us/rest/api/resources/resources/get-by-id) of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.We
+ // b/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function app can
+ // host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+ // The immutable version of the function being executed.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+ // versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-
+ // var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+ // The execution environment ID as a string, that will be potentially reused for
+ // other invocations to the same function/function version.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+ // The amount of memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// A host is defined as a general computing instance.
+const (
+ // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
+ // provider.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostIDKey = attribute.Key("host.id")
+ // Name of the host. On Unix systems, it may contain what the hostname command
+ // returns, or the fully qualified hostname, or another name specified by the
+ // user.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+ // Type of host. For Cloud, this must be the machine type.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+ // The CPU architecture the host system is running on.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+ // Name of the VM image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+ // VM image ID. For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+ // The version string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// A Kubernetes Cluster.
+const (
+ // The name of the cluster.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// A Kubernetes Node object.
+const (
+ // The name of the Node.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+ // The UID of the Node.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// A Kubernetes Namespace.
+const (
+ // The name of the namespace that the pod is running in.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// A Kubernetes Pod object.
+const (
+ // The UID of the Pod.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+ // The name of the Pod.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // The name of the Container from Pod specification, must be unique within a Pod.
+ // Container runtime usually uses different globally unique name
+ // (`container.name`).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+ // Number of times the container was restarted. This attribute can be used to
+ // identify a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// A Kubernetes ReplicaSet object.
+const (
+ // The UID of the ReplicaSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+ // The name of the ReplicaSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// A Kubernetes Deployment object.
+const (
+ // The UID of the Deployment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+ // The name of the Deployment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// A Kubernetes StatefulSet object.
+const (
+ // The UID of the StatefulSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+ // The name of the StatefulSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// A Kubernetes DaemonSet object.
+const (
+ // The UID of the DaemonSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+ // The name of the DaemonSet.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// A Kubernetes Job object.
+const (
+ // The UID of the Job.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+ // The name of the Job.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// A Kubernetes CronJob object.
+const (
+ // The UID of the CronJob.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+ // The name of the CronJob.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// The operating system (OS) on which the process represented by this resource is running.
+const (
+ // The operating system type.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+ // Human readable (not intended to be parsed) OS version information, like e.g.
+ // reported by `ver` or `lsb_release -a` commands.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+ // Human readable operating system name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+ // The version string of the operating system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// An operating system process.
+const (
+ // Process identifier (PID).
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+ // The name of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+ // The full path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+ // The command used to launch the process (i.e. the command name). On Linux based
+ // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
+ // can be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+ // The full command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
+ // set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // Required: See below
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+ // All the command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited strings
+ // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
+ // the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // Required: See below
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+ // The username of the user that owns the process.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// The single (language) runtime instance which is monitored.
+const (
+ // The name of the runtime of this process. For compiled native binaries, this
+ // SHOULD be the name of the compiler.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+ // The version of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+ // An additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// A service instance.
+const (
+ // Logical name of the service.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available, the
+ // value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+ // A namespace for `service.name`.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace defined
+ // (so the empty/unspecified namespace is simply one more valid namespace). Zero-
+ // length namespace string is assumed equal to unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+ // The string ID of the service instance.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to distinguish instances of the same service that exist
+ // at the same time (e.g. instances of a horizontally scaled service). It is
+ // preferable for the ID to be persistent and stay the same for the lifetime of
+ // the service instance, however it is acceptable that the ID is ephemeral and
+ // changes during important lifetime events for the service (e.g. service
+ // restarts). If the service has no inherent unique ID that can be used as the
+ // value of this attribute it is recommended to generate a random Version 1 or
+ // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+ // The version string of the service API or implementation.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// The telemetry SDK used to capture data recorded by the instrumentation libraries.
+const (
+ // The name of the telemetry SDK as defined above.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+ // The language of the telemetry SDK.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+ // The version string of the telemetry SDK.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+ // The version string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
+const (
+ // The name of the web engine.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+ // The version of the web engine.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+ // Additional description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
new file mode 100644
index 0000000000..f01d515bc2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
new file mode 100644
index 0000000000..70c25dc210
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
@@ -0,0 +1,1693 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
+const (
+ // The full invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
+ // applicable).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
+const (
+ // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
+ // .md#id) uniquely identifies the event.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+ // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
+ // d#source-1) identifies the context in which an event happened.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
+ // service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+ // The [version of the CloudEvents specification](https://github.com/cloudevents/s
+ // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+ // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
+ // ec.md#type) contains a value describing the type of event related to the
+ // originating occurrence.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+ // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
+ // md#subject) of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// This document defines semantic conventions for the OpenTracing Shim
+const (
+ // Parent-child Reference type
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// This document defines the attributes used to perform database client calls.
+const (
+ // An identifier for the database management system (DBMS) product being used. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+ // The connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+ // Username for accessing the database.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+ // The fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+ // used to connect.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+ // This attribute is used to report the name of the database being accessed. For
+ // commands that switch the database, this should be set to the target database
+ // (even if the command fails).
+ //
+ // Type: string
+ // Required: Required, if applicable.
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called "schema
+ // name". In case there are multiple layers that could be considered for database
+ // name (e.g. Oracle instance name and schema name), the database name to be used
+ // is the more specific layer (e.g. Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+ // The database statement being executed.
+ //
+ // Type: string
+ // Required: Required if applicable and not explicitly disabled via
+ // instrumentation configuration.
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+ // The name of the operation being executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // Required: Required, if `db.statement` is not applicable.
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to attempt any
+ // client-side parsing of `db.statement` just to get this property, but it should
+ // be set if the operation name is provided by the library being instrumented. If
+ // the SQL statement has an ambiguous operation, or performs more than one
+ // operation, this value may be omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+)
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
+ // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named instance.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// Call-level attributes for Cassandra
+const (
+ // The fetch size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+ // The consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-
+ // oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+ // The name of the primary table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // Required: Recommended if available.
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra rather
+ // than sql. It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+ // Whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+ // The number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+ // The ID of the coordinating node for a query.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+ // The data center of the coordinating node for a query.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Call-level attributes for Redis
+const (
+ // The index of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To be used
+ // instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // Required: Required, if other than the default database (`0`).
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// Call-level attributes for MongoDB
+const (
+ // The collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// Call-level attributes for SQL databases
+const (
+ // The name of the primary table that the operation is acting upon, including the
+ // database name (if applicable).
+ //
+ // Type: string
+ // Required: Recommended if available.
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting upon an
+ // anonymous table, or more than one table, this value MUST NOT be set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// This document defines the attributes used to report a single exception associated with a span.
+const (
+ // The type of the exception (its fully-qualified class name, if applicable). The
+ // dynamic type of the exception should be preferred over the static type in
+ // languages that support it.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+ // The exception message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+ // A stacktrace as a string in the natural representation for the language
+ // runtime. The representation is to be determined and documented by each language
+ // SIG.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+ // SHOULD be set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of a span,
+ // if that span is ended while the exception is still logically "in flight".
+ // This may be actually "in flight" in some languages (e.g. if the exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most languages.
+
+ // It is usually not possible to determine at the point where an exception is
+ // thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending the span,
+ // as done in the [example above](#recording-an-exception).
+
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
+const (
+ // Type of the trigger which caused this function execution.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+ // The execution ID of the current function execution.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
+const (
+ // The name of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
+ // DB to the database name.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+ // Describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+ // A string containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+ // The document name/table subjected to the operation. For example, in Cloud
+ // Storage or S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // A string containing the function invocation time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+ // in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+ // A string containing the schedule period as [Cron Expression](https://docs.oracl
+ // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // A boolean that is true if the serverless function is executed for the first
+ // time (aka cold-start).
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // The name of the invoked function.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+ // The cloud provider of the invoked function.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
+ // function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+ // The cloud region of the invoked function.
+ //
+ // Type: string
+ // Required: For some cloud providers, like AWS or GCP, the region in which a
+ // function is hosted is essential to uniquely identify the function and also part
+ // of its endpoint. Since it's part of the endpoint being called, the region is
+ // always known to clients. In these cases, `faas.invoked_region` MUST be set
+ // accordingly. If the region is unknown to the client or not required for
+ // identifying the invoked function, setting `faas.invoked_region` is optional.
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// These attributes may be used for any network related operation.
+const (
+ // Transport protocol used. See note below.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+ // Remote address of the peer (dotted decimal for IPv4 or
+ // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '127.0.0.1'
+ NetPeerIPKey = attribute.Key("net.peer.ip")
+ // Remote port number.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+ // Remote hostname or similar, see note below.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra
+ // DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+ // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetHostIPKey = attribute.Key("net.host.ip")
+ // Like `net.peer.port` but for the host port.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 35555
+ NetHostPortKey = attribute.Key("net.host.port")
+ // Local hostname or similar, see note below.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+ // The internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+ // This describes more details regarding the connection.type. It may be the type
+ // of cell technology connection, but it could be used for describing details
+ // about a wifi connection.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+ // The name of the mobile carrier.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+ // The mobile carrier country code.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+ // The mobile carrier network code.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+ // The ISO 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Another IP-based protocol
+ NetTransportIP = NetTransportKey.String("ip")
+ // Unix Domain socket. See below
+ NetTransportUnix = NetTransportKey.String("unix")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// Operations that access some remote service.
+const (
+ // The [`service.name`](../../resource/semantic_conventions/README.md#service) of
+ // the remote service. SHOULD be equal to the actual `service.name` resource
+ // attribute of the remote service if any.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// These attributes may be used for any operation with an authenticated and/or authorized enduser.
+const (
+ // Username or client_id extracted from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
+ // inbound request from outside the system.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+ // Actual/assumed role the client is making the request under extracted from token
+ // or application security context.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+ // Scopes or granted authorities the client currently possesses extracted from
+ // token or application security context. The value would come from the scope
+ // associated with an [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
+ // in a [SAML 2.0 Assertion](http://docs.oasis-
+ // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// These attributes may be used for any operation to store information about a thread that started a span.
+const (
+ // Current "managed" thread ID (as opposed to OS thread ID).
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+ // Current thread name.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// These attributes allow to report this unit of code and therefore to provide more context about the span.
+const (
+ // The method or function name, or equivalent (usually rightmost part of the code
+ // unit's name).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+ // The "namespace" within which `code.function` is defined. Usually the qualified
+ // class or module name, such that `code.namespace` + some separator +
+ // `code.function` form a unique identifier for the code unit.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+ // The source code file name that identifies the code unit as uniquely as possible
+ // (preferably an absolute file path).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+ // The line number in `code.filepath` best representing the operation. It SHOULD
+ // point within the code unit named in `code.function`.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+)
+
+// This document defines semantic conventions for HTTP client and server Spans.
+const (
+ // HTTP request method.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+ // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
+ // Usually the fragment is not transmitted over HTTP, but if it is known, it
+ // should be included nevertheless.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the attribute's
+ // value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+ // The full request target as passed in a HTTP request line or equivalent.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds#123'
+ HTTPTargetKey = attribute.Key("http.target")
+ // The value of the [HTTP host
+ // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
+ // should also be reported, see note.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'www.example.org'
+ // Note: When the header is present but empty the attribute SHOULD be set to the
+ // empty string. Note that this is a valid situation that is expected in certain
+ // cases, according the aforementioned [section of RFC
+ // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
+ // set the attribute MUST NOT be set.
+ HTTPHostKey = attribute.Key("http.host")
+ // The URI scheme identifying the used protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+ // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // Required: If and only if one was received/sent.
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+ // Kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
+ // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+ // Value of the [HTTP User-
+ // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
+ // client.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+ // The size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+ // requests using transport encoding, this should be the compressed size.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+ // The size of the uncompressed request payload body after transport decoding. Not
+ // set if transport encoding not used.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5493
+ HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
+ // The size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as the
+ // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+ // requests using transport encoding, this should be the compressed size.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+ // The size of the uncompressed response payload body after transport decoding.
+ // Not set if transport encoding not used.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 5493
+ HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
+ // The ordinal number of request re-sending attempt.
+ //
+ // Type: int
+ // Required: If and only if a request was retried.
+ // Stability: stable
+ // Examples: 3
+ HTTPRetryCountKey = attribute.Key("http.retry_count")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// Semantic Convention for HTTP Server
+const (
+ // The primary server name of the matched virtual host. This should be obtained
+ // via configuration. If no such configuration can be obtained, this attribute
+ // MUST NOT be set ( `net.host.name` should be used instead).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `http.url` is usually not readily available on the server side but would
+ // have to be assembled in a cumbersome and sometimes lossy process from other
+ // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
+ // preferred to supply the raw data that is available.
+ HTTPServerNameKey = attribute.Key("http.server_name")
+ // The matched route (path template).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '/users/:userID?'
+ HTTPRouteKey = attribute.Key("http.route")
+ // The IP address of the original client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-
+ // US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.peer.ip`, which would
+ // identify the network-level peer, which may be a proxy.
+
+ // This attribute should be set when a source of information different
+ // from the one used for `net.peer.ip`, is available even if that other
+ // source just confirms the same value as `net.peer.ip`.
+ // Rationale: For `net.peer.ip`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.peer.ip` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // The keys in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+ // The JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
+ // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+ // The JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+ // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // Required: No
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+ // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // Required: No
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+ // The value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+ // The value of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
+ // ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+ // The value of the `Limit` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+ // The value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+ // The value of the `IndexName` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+ // The value of the `Select` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// DynamoDB.CreateTable
+const (
+ // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
+ // field
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
+ // number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+ // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// DynamoDB.ListTables
+const (
+ // The value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+ // The number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// DynamoDB.Query
+const (
+ // The value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // Required: No
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// DynamoDB.Scan
+const (
+ // The value of the `Segment` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+ // The value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+ // The value of the `Count` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+ // The value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// DynamoDB.UpdateTable
+const (
+ // The JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+ // The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates`
+ // request field.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// This document defines the attributes used in messaging systems.
+const (
+ // A string identifying the messaging system.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+ // The message destination name. This might be equal to the span name but is
+ // required nevertheless.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ MessagingDestinationKey = attribute.Key("messaging.destination")
+ // The kind of message destination
+ //
+ // Type: Enum
+ // Required: Required only if the message destination is either a `queue` or
+ // `topic`.
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
+ // A boolean that is true if the message destination is temporary.
+ //
+ // Type: boolean
+ // Required: If missing, it is assumed to be false.
+ // Stability: stable
+ MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
+ // The name of the transport protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'AMQP', 'MQTT'
+ MessagingProtocolKey = attribute.Key("messaging.protocol")
+ // The version of the transport protocol.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '0.9.1'
+ MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
+ // Connection string.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'tibjmsnaming://localhost:7222',
+ // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
+ MessagingURLKey = attribute.Key("messaging.url")
+ // A value used by the messaging system as an identifier for the message,
+ // represented as a string.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message_id")
+ // The [conversation ID](#conversations) identifying the conversation to which the
+ // message belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
+ // The (uncompressed) size of the message payload in bytes. Also use this
+ // attribute if it is unknown whether the compressed or uncompressed payload size
+ // is reported.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
+ // The compressed size of the message payload in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// Semantic convention for a consumer of messages received from a messaging system
+const (
+ // A string identifying the kind of message consumption as defined in the
+ // [Operation names](#operation-names) section above. If the operation is "send",
+ // this attribute MUST NOT be set, since the operation can be inferred from the
+ // span kind in that case.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingOperationKey = attribute.Key("messaging.operation")
+ // The identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
+ // present, or only `messaging.kafka.consumer_group`. For brokers, such as
+ // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+ // message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
+)
+
+var (
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// Attributes for RabbitMQ
+const (
+ // RabbitMQ message routing key.
+ //
+ // Type: string
+ // Required: Unless it is empty.
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
+)
+
+// Attributes for Apache Kafka
+const (
+ // Message keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message_id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
+ // Name of the Kafka Consumer Group that is handling the message. Only applies to
+ // consumers, not producers.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
+ // Client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+ // Partition the message is sent to.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
+ // A boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // Required: If missing, it is assumed to be false.
+ // Stability: stable
+ MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
+)
+
+// Attributes for Apache RocketMQ
+const (
+ // Namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+ // Name of the RocketMQ producer/consumer group that is handling the message. The
+ // client type is identified by the SpanKind.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+ // The unique identifier for each client.
+ //
+ // Type: string
+ // Required: Always
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+ // Type of message.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
+ // The secondary classifier of message besides topic.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
+ // Key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // Required: No
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
+ // Model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// This document defines semantic conventions for remote procedure calls.
+const (
+ // A string identifying the remoting system. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+ // The full (logical) name of the service being called, including its package
+ // name, if applicable.
+ //
+ // Type: string
+ // Required: No, but recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+ // The name of the (logical) method being called, must be equal to the $method
+ // part in the span name.
+ //
+ // Type: string
+ // Required: No, but recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the latter
+ // (e.g., method actually executing the call on the server side, RPC client stub
+ // method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// Tech-specific attributes for gRPC.
+const (
+ // The [numeric status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
+ // request.
+ //
+ // Type: Enum
+ // Required: Always
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
+ // 1.0 does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // Required: If missing, it is assumed to be "1.0".
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+ // `id` property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be cast to
+ // string for simplicity. Use empty string in case of `null` value. Omit entirely
+ // if this is a notification.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // Required: If missing, response is assumed to be successful.
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // Required: No
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPC received/sent message.
+const (
+ // Whether this is a received or sent message.
+ //
+ // Type: Enum
+ // Required: No
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+ // MUST be calculated as two different counters starting from `1` one for sent
+ // messages and one for received message.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+ // Compressed size of the message in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+ // Uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // Required: No
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
new file mode 100644
index 0000000000..87b842c5d1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.17.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 0000000000..e087c9c04d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
new file mode 100644
index 0000000000..c7b804bbe2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
new file mode 100644
index 0000000000..137acc67de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
new file mode 100644
index 0000000000..d318221e59
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 0000000000..7e365e82ce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,1999 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserUserAgentKey is the attribute Key conforming to the
+ // "browser.user_agent" semantic conventions. It represents the full
+ // user-agent string provided by the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
+ // AppleWebKit/537.36 (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do
+ // not have a mechanism to retrieve brands and platform individually from
+ // the User-Agent Client Hints API. To retrieve the value, the legacy
+ // `navigator.userAgent` API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserUserAgent returns an attribute KeyValue conforming to the
+// "browser.user_agent" semantic conventions. It represents the full user-agent
+// string provided by the browser
+func BrowserUserAgent(val string) attribute.KeyValue {
+ return BrowserUserAgentKey.String(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
+ // conventions. It represents the unique ID of the single function that
+ // this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+ //
+ // The exact value to use for `faas.id` depends on the cloud provider:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
+// conventions. It represents the unique ID of the single function that this
+// runtime instance executes.
+func FaaSID(val string) attribute.KeyValue {
+ return FaaSIDKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function in MiB.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // Linux systems, the `machine-id` located in `/etc/machine-id` or
+ // `/var/lib/dbus/machine-id` may be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized Linux
+// systems, the `machine-id` located in `/etc/machine-id` or
+// `/var/lib/dbus/machine-id` may be used.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OtelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OtelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OtelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OtelScopeName(val string) attribute.KeyValue {
+ return OtelScopeNameKey.String(val)
+}
+
+// OtelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OtelScopeVersion(val string) attribute.KeyValue {
+ return OtelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OtelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OtelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OtelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OtelLibraryName(val string) attribute.KeyValue {
+ return OtelLibraryNameKey.String(val)
+}
+
+// OtelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OtelLibraryVersion(val string) attribute.KeyValue {
+ return OtelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 0000000000..634a1dce07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 0000000000..21497bb6bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,3364 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable and not
+ // explicitly disabled via instrumentation configuration.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OtelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OtelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OtelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// OtelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OtelStatusDescription(val string) attribute.KeyValue {
+ return OtelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
+ // semantic conventions. It represents the execution ID of the current
+ // function execution.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSExecution returns an attribute KeyValue conforming to the
+// "faas.execution" semantic conventions. It represents the execution ID of the
+// current function execution.
+func FaaSExecution(val string) attribute.KeyValue {
+ return FaaSExecutionKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetAppProtocolNameKey is the attribute Key conforming to the
+ // "net.app.protocol.name" semantic conventions. It represents the
+ // application layer protocol used. The value SHOULD be normalized to
+ // lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+
+ // NetAppProtocolVersionKey is the attribute Key conforming to the
+ // "net.app.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.app.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.host.port` and if `net.sock.host.addr` is set.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetAppProtocolName returns an attribute KeyValue conforming to the
+// "net.app.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetAppProtocolName(val string) attribute.KeyValue {
+ return NetAppProtocolNameKey.String(val)
+}
+
+// NetAppProtocolVersion returns an attribute KeyValue conforming to the
+// "net.app.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetAppProtocolVersion(val string) attribute.KeyValue {
+ return NetAppProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions. It represents the kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be
+ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
+ // assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions. It represents the value of the
+ // [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions. It represents the value of the [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: 'http.route' MUST NOT be populated when this is not supported by
+ // the HTTP server framework as the route attribute should have
+ // low-cardinality and the URI path can NOT substitute it.
+ HTTPRouteKey = attribute.Key("http.route")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationKindKey is the attribute Key conforming to the
+ // "messaging.destination.kind" semantic conventions. It represents the
+ // kind of message destination
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceKindKey is the attribute Key conforming to the
+ // "messaging.source.kind" semantic conventions. It represents the kind of
+ // message source
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+ // A message received from a queue
+ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+ // A message received from a topic
+ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
new file mode 100644
index 0000000000..82e1f46b4e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.20.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
new file mode 100644
index 0000000000..6685c392b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -0,0 +1,1198 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTP Server spans attributes
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
+ // if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the application
+ // layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the version
+ // of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.protocol.version` refers to the version of the protocol used
+ // and might be different from the protocol client's version. If the HTTP
+ // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If defined for the address
+ // family and if different than `net.host.port` and if `net.sock.host.addr`
+ // is set. In other cases, it is still recommended to set this.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the version of
+// the application layer protocol used. See note below.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
new file mode 100644
index 0000000000..0d1f55a8fe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.20.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
new file mode 100644
index 0000000000..6377639321
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
new file mode 100644
index 0000000000..f40c97825a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
new file mode 100644
index 0000000000..9c1840631b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
new file mode 100644
index 0000000000..3d44dae275
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -0,0 +1,2060 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
new file mode 100644
index 0000000000..95d0210e38
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
new file mode 100644
index 0000000000..90b1b0452c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -0,0 +1,2599 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/users/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md
new file mode 100644
index 0000000000..bc60aa6039
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.21.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.21.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.21.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
new file mode 100644
index 0000000000..a9a15a1dab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go
@@ -0,0 +1,1866 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - unix domain
+ // socket name, IPv4 or IPv6 address.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/tmp/my.sock', '10.1.2.80'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent client address behind
+ // any intermediaries (e.g. proxies) if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent client port behind any
+ // intermediaries (e.g. proxies) if it's available.
+ ClientPortKey = attribute.Key("client.port")
+
+ // ClientSocketAddressKey is the attribute Key conforming to the
+ // "client.socket.address" semantic conventions. It represents the
+ // immediate client peer address - unix domain socket name, IPv4 or IPv6
+ // address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `client.address`.)
+ // Stability: stable
+ // Examples: '/tmp/my.sock', '127.0.0.1'
+ ClientSocketAddressKey = attribute.Key("client.socket.address")
+
+ // ClientSocketPortKey is the attribute Key conforming to the
+ // "client.socket.port" semantic conventions. It represents the immediate
+ // client peer port number
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If different than `client.port`.)
+ // Stability: stable
+ // Examples: 35555
+ ClientSocketPortKey = attribute.Key("client.socket.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// unix domain socket name, IPv4 or IPv6 address.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// ClientSocketAddress returns an attribute KeyValue conforming to the
+// "client.socket.address" semantic conventions. It represents the immediate
+// client peer address - unix domain socket name, IPv4 or IPv6 address.
+func ClientSocketAddress(val string) attribute.KeyValue {
+ return ClientSocketAddressKey.String(val)
+}
+
+// ClientSocketPort returns an attribute KeyValue conforming to the
+// "client.socket.port" semantic conventions. It represents the immediate
+// client peer port number
+func ClientSocketPort(val int) attribute.KeyValue {
+ return ClientSocketPortKey.Int(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the deprecated, use
+ // `http.request.method` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the deprecated,
+ // use `http.response.status_code` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the deprecated, use `url.scheme`
+ // instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the deprecated, use `url.full` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the deprecated, use `url.path` and
+ // `url.query` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/search?q=OpenTelemetry#SemConv'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // deprecated, use `http.request.body.size` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // deprecated, use `http.response.body.size` instead.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the deprecated, use
+// `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the deprecated, use
+// `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the deprecated, use `url.scheme`
+// instead.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the deprecated, use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the deprecated, use `url.path` and
+// `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the
+// deprecated, use `http.request.body.size` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the
+// deprecated, use `http.response.body.size` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the deprecated,
+ // use `server.socket.domain` on client spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the deprecated,
+ // use `server.socket.address` on client spans and `client.socket.address`
+ // on server spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '192.168.0.1'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the deprecated,
+ // use `server.socket.port` on client spans and `client.socket.port` on
+ // server spans.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 65531
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the deprecated, use `server.address`
+ // on client spans and `client.address` on server spans.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the deprecated, use `server.port` on
+ // client spans and `client.port` on server spans.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the deprecated, use
+ // `server.address`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the deprecated, use `server.port`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the deprecated,
+ // use `server.socket.address`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the deprecated,
+ // use `server.socket.port`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the deprecated, use
+ // `network.transport`.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the deprecated,
+ // use `network.protocol.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the
+ // deprecated, use `network.protocol.version`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '3.1.1'
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the deprecated,
+ // use `network.transport` and `network.type`.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the deprecated, use
+// `server.socket.domain` on client spans.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address` on client spans and `client.socket.address` on
+// server spans.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port` on client spans and `client.socket.port` on server
+// spans.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the deprecated, use
+// `server.address` on client spans and `client.address` on server spans.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the deprecated, use
+// `server.port` on client spans and `client.port` on server spans.
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the deprecated, use
+// `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the deprecated, use
+// `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the deprecated, use
+// `server.socket.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the deprecated, use
+// `server.socket.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the deprecated, use
+// `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the deprecated,
+// use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+ // DestinationDomainKey is the attribute Key conforming to the
+ // "destination.domain" semantic conventions. It represents the domain name
+ // of the destination system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'foo.example.com'
+ // Note: This value may be a host name, a fully qualified domain name, or
+ // another host naming format.
+ DestinationDomainKey = attribute.Key("destination.domain")
+
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the peer
+ // address, for example IP address or UNIX socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the peer port
+ // number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationDomain returns an attribute KeyValue conforming to the
+// "destination.domain" semantic conventions. It represents the domain name of
+// the destination system.
+func DestinationDomain(val string) attribute.KeyValue {
+ return DestinationDomainKey.String(val)
+}
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the peer address,
+// for example IP address or UNIX socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the peer port number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes HTTP attributes.
+const (
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER` and, except if reporting
+ // a metric, MUST
+ // set the exact method received in the request line as value of the
+ // `http.request.method_original` attribute.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTP Server attributes
+const (
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // TypeKey is the attribute Key conforming to the "type" semantic
+ // conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ TypeKey = attribute.Key("type")
+
+ // PoolKey is the attribute Key conforming to the "pool" semantic
+ // conventions. It represents the name of the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ PoolKey = attribute.Key("pool")
+)
+
+var (
+ // Heap memory
+ TypeHeap = TypeKey.String("heap")
+ // Non-heap memory
+ TypeNonHeap = TypeKey.String("non_heap")
+)
+
+// Pool returns an attribute KeyValue conforming to the "pool" semantic
+// conventions. It represents the name of the memory pool.
+func Pool(val string) attribute.KeyValue {
+ return PoolKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the logical server hostname, matches
+ // server FQDN if available, and IP or socket address if FQDN is not known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the logical server port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ ServerPortKey = attribute.Key("server.port")
+
+ // ServerSocketDomainKey is the attribute Key conforming to the
+ // "server.socket.domain" semantic conventions. It represents the domain
+ // name of an immediate peer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `server.address`.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ // Note: Typically observed from the client side, and represents a proxy or
+ // other intermediary domain name.
+ ServerSocketDomainKey = attribute.Key("server.socket.domain")
+
+ // ServerSocketAddressKey is the attribute Key conforming to the
+ // "server.socket.address" semantic conventions. It represents the physical
+ // server IP address or Unix socket address. If set from the client, should
+ // simply use the socket's peer address, and not attempt to find any actual
+ // server IP (i.e., if set from client, this may represent some proxy
+ // server instead of the logical server).
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If different than `server.address`.)
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ ServerSocketAddressKey = attribute.Key("server.socket.address")
+
+ // ServerSocketPortKey is the attribute Key conforming to the
+ // "server.socket.port" semantic conventions. It represents the physical
+ // server port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If different than `server.port`.)
+ // Stability: stable
+ // Examples: 16456
+ ServerSocketPortKey = attribute.Key("server.socket.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the logical server
+// hostname, matches server FQDN if available, and IP or socket address if FQDN
+// is not known.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the logical server port number
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// ServerSocketDomain returns an attribute KeyValue conforming to the
+// "server.socket.domain" semantic conventions. It represents the domain name
+// of an immediate peer.
+func ServerSocketDomain(val string) attribute.KeyValue {
+ return ServerSocketDomainKey.String(val)
+}
+
+// ServerSocketAddress returns an attribute KeyValue conforming to the
+// "server.socket.address" semantic conventions. It represents the physical
+// server IP address or Unix socket address. If set from the client, should
+// simply use the socket's peer address, and not attempt to find any actual
+// server IP (i.e., if set from client, this may represent some proxy server
+// instead of the logical server).
+func ServerSocketAddress(val string) attribute.KeyValue {
+ return ServerSocketAddressKey.String(val)
+}
+
+// ServerSocketPort returns an attribute KeyValue conforming to the
+// "server.socket.port" semantic conventions. It represents the physical server
+// port.
+func ServerSocketPort(val int) attribute.KeyValue {
+ return ServerSocketPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+ // SourceDomainKey is the attribute Key conforming to the "source.domain"
+ // semantic conventions. It represents the domain name of the source
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'foo.example.com'
+ // Note: This value may be a host name, a fully qualified domain name, or
+ // another host naming format.
+ SourceDomainKey = attribute.Key("source.domain")
+
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address, for example IP
+ // address or Unix socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.5.3.2'
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceDomain returns an attribute KeyValue conforming to the
+// "source.domain" semantic conventions. It represents the domain name of the
+// source system.
+func SourceDomain(val string) attribute.KeyValue {
+ return SourceDomainKey.String(val)
+}
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address, for
+// example IP address or Unix socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // Transport Layer](https://osi-model.com/transport-layer/) or
+ // [Inter-process Communication
+ // method](https://en.wikipedia.org/wiki/Inter-process_communication). The
+ // value SHOULD be normalized to lowercase.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI Network
+ // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The
+ // value SHOULD be normalized to lowercase.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ NetworkTypeKey = attribute.Key("network.type")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // Application Layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `network.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe. See note below
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// Application Layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent. The value SHOULD be normalized to lowercase.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's different
+ // than `http.request.method`.)
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+)
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it should be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password should be redacted and attribute's value should be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed) and SHOULD NOT be validated or modified except for
+ // sanitizing purposes.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: When missing, the value is assumed to be `/`
+ URLPathKey = attribute.Key("url.path")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in query string SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+)
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
new file mode 100644
index 0000000000..461331a555
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
new file mode 100644
index 0000000000..c09d9317e2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
new file mode 100644
index 0000000000..5184ee339a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
new file mode 100644
index 0000000000..f7aaa50b9e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go
@@ -0,0 +1,2299 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+)
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // OCI defines a digest of manifest.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S does not have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
new file mode 100644
index 0000000000..be07217d8a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
new file mode 100644
index 0000000000..55698cc447
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go
@@ -0,0 +1,2484 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client_id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If a client id is available)
+ // Stability: stable
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client_id")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md
new file mode 100644
index 0000000000..980fcc7df5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.25.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.25.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.25.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go
new file mode 100644
index 0000000000..30a51fa701
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go
@@ -0,0 +1,8038 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.31.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+ // PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+ // conventions. It represents the name of the connection pool; unique
+ // within the instrumented application. In case the connection pool
+ // implementation doesn't provide a name, instrumentation should use a
+ // combination of `server.address` and `server.port` attributes formatted
+ // as `server.address:server.port`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ PoolNameKey = attribute.Key("pool.name")
+
+ // StateKey is the attribute Key conforming to the "state" semantic
+ // conventions. It represents the state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'idle'
+ StateKey = attribute.Key("state")
+)
+
+var (
+ // idle
+ StateIdle = StateKey.String("idle")
+ // used
+ StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation doesn't provide a name, instrumentation should use a
+// combination of `server.address` and `server.port` attributes formatted as
+// `server.address:server.port`.
+func PoolName(val string) attribute.KeyValue {
+ return PoolNameKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: stable
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if the matched endpoint for the
+ // request had a rate-limiting policy.)
+ // Stability: stable
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (if and only if the request was
+ // not handled.)
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If and only if a route was
+ // successfully matched.)
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// Describes JVM buffer metric attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// Attributes for process CPU metrics.
+const (
+ // ProcessCPUStateKey is the attribute Key conforming to the
+ // "process.cpu.state" semantic conventions. It represents the CPU state
+ // for this data point. A process SHOULD be characterized _either_ by data
+ // points with no `state` labels, _or only_ data points with `state`
+ // labels.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+ // system
+ ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+ // user
+ ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+ // wait
+ ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Describes System metric attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the CPU state for
+ // this data point. A system's CPU SHOULD be characterized *either* by data
+ // points with no `state` labels, *or only* data points with `state`
+ // labels.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem metric attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+ // running
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// The web browser attributes
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups/