diff --git a/Gopkg.lock b/Gopkg.lock index cccf6b4f7..c0874e295 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -25,18 +25,6 @@ pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" -[[projects]] - name = "github.com/dnstap/golang-dnstap" - packages = ["."] - revision = "6b9665e675009e360f1c34b4526c2b215bc2ff68" - version = "tags/v0.1.0" - -[[projects]] - name = "github.com/farsightsec/golang-framestream" - packages = ["."] - revision = "fa4b164d59b87c744f49643b5d435effba516c4e" - version = "tags/v0.2.0" - [[projects]] digest = "1:705c40022f5c03bf96ffeb6477858d88565064485a513abcd0f11a0911546cb6" name = "github.com/blang/semver" @@ -57,17 +45,12 @@ "plugin/cache", "plugin/cache/freq", "plugin/debug", - "plugin/dnstap", - "plugin/dnstap/dnstapio", - "plugin/dnstap/msg", - "plugin/dnstap/taprw", "plugin/errors", "plugin/etcd/msg", "plugin/forward", "plugin/health", "plugin/log", "plugin/loop", - "plugin/metadata", "plugin/metrics", "plugin/metrics/vars", "plugin/pkg/cache", @@ -419,11 +402,10 @@ packages = [ "prometheus", "prometheus/promhttp", - "prometheus/internal" ] pruneopts = "UT" - revision = "505eaef017263e299324067d40ca2c48f6a2cf50" - version = "v0.9.2" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" [[projects]] branch = "master" @@ -943,4 +925,4 @@ "k8s.io/kubernetes/pkg/version/prometheus", ] solver-name = "gps-cdcl" - solver-version = 1 \ No newline at end of file + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 76d714fa3..c8961f697 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -47,7 +47,7 @@ [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.9.2" + version = "0.8.0" [[constraint]] branch = "master" @@ -85,18 +85,10 @@ name = "github.com/ugorji/go" revision = "9c7f9b7a2bc3a520f7c7b30b34b7f85f47fe27b6" -[[override]] - name = "github.com/dnstap/golang-dnstap" - revision = "6b9665e675009e360f1c34b4526c2b215bc2ff68" - -[[override]] - name = "github.com/farsightsec/golang-framestream" - revision = "fa4b164d59b87c744f49643b5d435effba516c4e" - [prune] go-tests = true unused-packages = true # Do not prune unused files in ginkgo. Those are needed to build ginkgo [[prune.project]] name = "github.com/onsi/ginkgo" - unused-packages = false \ No newline at end of file + unused-packages = false diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/OWNERS b/vendor/github.com/coredns/coredns/plugin/dnstap/OWNERS deleted file mode 100644 index 6f6724297..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - varyoo - - yongtang -approvers: - - varyoo - - yongtang diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/README.md b/vendor/github.com/coredns/coredns/plugin/dnstap/README.md deleted file mode 100644 index 81152e742..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# dnstap - -## Name - -*dnstap* - enable logging to dnstap. - -## Description - -dnstap is a flexible, structured binary log format for DNS software: http://dnstap.info. With this -plugin you make CoreDNS output dnstap logging. - -Note that there is an internal buffer, so expect at least 13 requests before the server sends its -dnstap messages to the socket. - -## Syntax - -~~~ txt -dnstap SOCKET [full] -~~~ - -* **SOCKET** is the socket path supplied to the dnstap command line tool. -* `full` to include the wire-format DNS message. - -## Examples - -Log information about client requests and responses to */tmp/dnstap.sock*. - -~~~ txt -dnstap /tmp/dnstap.sock -~~~ - -Log information including the wire-format DNS message about client requests and responses to */tmp/dnstap.sock*. - -~~~ txt -dnstap unix:///tmp/dnstap.sock full -~~~ - -Log to a remote endpoint. - -~~~ txt -dnstap tcp://127.0.0.1:6000 full -~~~ - -## Command Line Tool - -Dnstap has a command line tool that can be used to inspect the logging. The tool can be found -at Github: . It's written in Go. - -The following command listens on the given socket and decodes messages to stdout. - -~~~ sh -$ dnstap -u /tmp/dnstap.sock -~~~ - -The following command listens on the given socket and saves message payloads to a binary dnstap-format log file. - -~~~ sh -$ dnstap -u /tmp/dnstap.sock -w /tmp/test.dnstap -~~~ - -Listen for dnstap messages on port 6000. - -~~~ sh -$ dnstap -l 127.0.0.1:6000 -~~~ - -## Using Dnstap in your plugin - -~~~ Go -import ( - "github.com/coredns/coredns/plugin/dnstap" - "github.com/coredns/coredns/plugin/dnstap/msg" -) - -func (h Dnstap) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - // log client query to Dnstap - if t := dnstap.TapperFromContext(ctx); t != nil { - b := msg.New().Time(time.Now()).Addr(w.RemoteAddr()) - if t.Pack() { - b.Msg(r) - } - if m, err := b.ToClientQuery(); err == nil { - t.TapMessage(m) - } - } - - // ... -} -~~~ - -## See Also - -[dnstap.info](http://dnstap.info). diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/dnstap_encoder.go b/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/dnstap_encoder.go deleted file mode 100644 index 07dfc8413..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/dnstap_encoder.go +++ /dev/null @@ -1,92 +0,0 @@ -package dnstapio - -import ( - "encoding/binary" - "fmt" - "io" - - tap "github.com/dnstap/golang-dnstap" - fs "github.com/farsightsec/golang-framestream" - "github.com/golang/protobuf/proto" -) - -const ( - frameLenSize = 4 - protobufSize = 1024 * 1024 -) - -type dnstapEncoder struct { - fse *fs.Encoder - opts *fs.EncoderOptions - writer io.Writer - buffer *proto.Buffer -} - -func newDnstapEncoder(o *fs.EncoderOptions) *dnstapEncoder { - return &dnstapEncoder{ - opts: o, - buffer: proto.NewBuffer(make([]byte, 0, protobufSize)), - } -} - -func (enc *dnstapEncoder) resetWriter(w io.Writer) error { - fse, err := fs.NewEncoder(w, enc.opts) - if err != nil { - return err - } - if err = fse.Flush(); err != nil { - return err - } - enc.fse = fse - enc.writer = w - return nil -} - -func (enc *dnstapEncoder) writeMsg(msg *tap.Dnstap) error { - if len(enc.buffer.Bytes()) >= protobufSize { - if err := enc.flushBuffer(); err != nil { - return err - } - } - bufLen := len(enc.buffer.Bytes()) - // add placeholder for frame length - if err := enc.buffer.EncodeFixed32(0); err != nil { - enc.buffer.SetBuf(enc.buffer.Bytes()[:bufLen]) - return err - } - if err := enc.buffer.Marshal(msg); err != nil { - enc.buffer.SetBuf(enc.buffer.Bytes()[:bufLen]) - return err - } - enc.encodeFrameLen(enc.buffer.Bytes()[bufLen:]) - return nil -} - -func (enc *dnstapEncoder) flushBuffer() error { - if enc.fse == nil || enc.writer == nil { - return fmt.Errorf("no writer") - } - - buf := enc.buffer.Bytes() - written := 0 - for written < len(buf) { - n, err := enc.writer.Write(buf[written:]) - written += n - if err != nil { - return err - } - } - enc.buffer.Reset() - return nil -} - -func (enc *dnstapEncoder) encodeFrameLen(buf []byte) { - binary.BigEndian.PutUint32(buf, uint32(len(buf)-4)) -} - -func (enc *dnstapEncoder) close() error { - if enc.fse != nil { - return enc.fse.Close() - } - return nil -} diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/io.go b/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/io.go deleted file mode 100644 index 65e2e222e..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/dnstapio/io.go +++ /dev/null @@ -1,146 +0,0 @@ -package dnstapio - -import ( - "net" - "sync/atomic" - "time" - - clog "github.com/coredns/coredns/plugin/pkg/log" - - tap "github.com/dnstap/golang-dnstap" - fs "github.com/farsightsec/golang-framestream" -) - -var log = clog.NewWithPlugin("dnstap") - -const ( - tcpWriteBufSize = 1024 * 1024 - tcpTimeout = 4 * time.Second - flushTimeout = 1 * time.Second - queueSize = 10000 -) - -type dnstapIO struct { - endpoint string - socket bool - conn net.Conn - enc *dnstapEncoder - queue chan tap.Dnstap - dropped uint32 - quit chan struct{} -} - -// New returns a new and initialized DnstapIO. -func New(endpoint string, socket bool) DnstapIO { - return &dnstapIO{ - endpoint: endpoint, - socket: socket, - enc: newDnstapEncoder(&fs.EncoderOptions{ - ContentType: []byte("protobuf:dnstap.Dnstap"), - Bidirectional: true, - }), - queue: make(chan tap.Dnstap, queueSize), - quit: make(chan struct{}), - } -} - -// DnstapIO interface -type DnstapIO interface { - Connect() - Dnstap(payload tap.Dnstap) - Close() -} - -func (dio *dnstapIO) newConnect() error { - var err error - if dio.socket { - if dio.conn, err = net.Dial("unix", dio.endpoint); err != nil { - return err - } - } else { - if dio.conn, err = net.DialTimeout("tcp", dio.endpoint, tcpTimeout); err != nil { - return err - } - if tcpConn, ok := dio.conn.(*net.TCPConn); ok { - tcpConn.SetWriteBuffer(tcpWriteBufSize) - tcpConn.SetNoDelay(false) - } - } - - return dio.enc.resetWriter(dio.conn) -} - -// Connect connects to the dnstop endpoint. -func (dio *dnstapIO) Connect() { - if err := dio.newConnect(); err != nil { - log.Error("No connection to dnstap endpoint") - } - go dio.serve() -} - -// Dnstap enqueues the payload for log. -func (dio *dnstapIO) Dnstap(payload tap.Dnstap) { - select { - case dio.queue <- payload: - default: - atomic.AddUint32(&dio.dropped, 1) - } -} - -func (dio *dnstapIO) closeConnection() { - dio.enc.close() - if dio.conn != nil { - dio.conn.Close() - dio.conn = nil - } -} - -// Close waits until the I/O routine is finished to return. -func (dio *dnstapIO) Close() { - close(dio.quit) -} - -func (dio *dnstapIO) flushBuffer() { - if dio.conn == nil { - if err := dio.newConnect(); err != nil { - return - } - log.Info("Reconnected to dnstap") - } - - if err := dio.enc.flushBuffer(); err != nil { - log.Warningf("Connection lost: %s", err) - dio.closeConnection() - if err := dio.newConnect(); err != nil { - log.Errorf("Cannot connect to dnstap: %s", err) - } else { - log.Info("Reconnected to dnstap") - } - } -} - -func (dio *dnstapIO) write(payload *tap.Dnstap) { - if err := dio.enc.writeMsg(payload); err != nil { - atomic.AddUint32(&dio.dropped, 1) - } -} - -func (dio *dnstapIO) serve() { - timeout := time.After(flushTimeout) - for { - select { - case <-dio.quit: - dio.flushBuffer() - dio.closeConnection() - return - case payload := <-dio.queue: - dio.write(&payload) - case <-timeout: - if dropped := atomic.SwapUint32(&dio.dropped, 0); dropped > 0 { - log.Warningf("Dropped dnstap messages: %d", dropped) - } - dio.flushBuffer() - timeout = time.After(flushTimeout) - } - } -} diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/gocontext.go b/vendor/github.com/coredns/coredns/plugin/dnstap/gocontext.go deleted file mode 100644 index a8cc2c2b4..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/gocontext.go +++ /dev/null @@ -1,23 +0,0 @@ -package dnstap - -import "context" - -type contextKey struct{} - -var dnstapKey = contextKey{} - -// ContextWithTapper returns a new `context.Context` that holds a reference to -// `t`'s Tapper. -func ContextWithTapper(ctx context.Context, t Tapper) context.Context { - return context.WithValue(ctx, dnstapKey, t) -} - -// TapperFromContext returns the `Tapper` previously associated with `ctx`, or -// `nil` if no such `Tapper` could be found. -func TapperFromContext(ctx context.Context) Tapper { - val := ctx.Value(dnstapKey) - if sp, ok := val.(Tapper); ok { - return sp - } - return nil -} diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/handler.go b/vendor/github.com/coredns/coredns/plugin/dnstap/handler.go deleted file mode 100644 index 1178dad79..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/handler.go +++ /dev/null @@ -1,92 +0,0 @@ -package dnstap - -import ( - "context" - "time" - - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/dnstap/taprw" - - tap "github.com/dnstap/golang-dnstap" - "github.com/miekg/dns" -) - -// Dnstap is the dnstap handler. -type Dnstap struct { - Next plugin.Handler - IO IORoutine - - // Set to true to include the relevant raw DNS message into the dnstap messages. - JoinRawMessage bool -} - -type ( - // IORoutine is the dnstap I/O thread as defined by: . - IORoutine interface { - Dnstap(tap.Dnstap) - } - // Tapper is implemented by the Context passed by the dnstap handler. - Tapper interface { - TapMessage(message *tap.Message) - Pack() bool - } - tapContext struct { - context.Context - Dnstap - } -) - -// ContextKey defines the type of key that is used to save data into the context -type ContextKey string - -const ( - // DnstapSendOption specifies the Dnstap message to be send. Default is sent all. - DnstapSendOption ContextKey = "dnstap-send-option" -) - -// TapMessage implements Tapper. -func (h Dnstap) TapMessage(m *tap.Message) { - t := tap.Dnstap_MESSAGE - h.IO.Dnstap(tap.Dnstap{ - Type: &t, - Message: m, - }) -} - -// Pack returns true if the raw DNS message should be included into the dnstap messages. -func (h Dnstap) Pack() bool { - return h.JoinRawMessage -} - -// ServeDNS logs the client query and response to dnstap and passes the dnstap Context. -func (h Dnstap) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - - // Add send option into context so other plugin can decide on which DNSTap - // message to be sent out - sendOption := taprw.SendOption{Cq: true, Cr: true} - newCtx := context.WithValue(ctx, DnstapSendOption, &sendOption) - newCtx = ContextWithTapper(newCtx, h) - - rw := &taprw.ResponseWriter{ - ResponseWriter: w, - Tapper: &h, - Query: r, - Send: &sendOption, - QueryEpoch: time.Now(), - } - - code, err := plugin.NextOrFailure(h.Name(), h.Next, newCtx, rw, r) - if err != nil { - // ignore dnstap errors - return code, err - } - - if err = rw.DnstapError(); err != nil { - return code, plugin.Error("dnstap", err) - } - - return code, nil -} - -// Name returns dnstap. -func (h Dnstap) Name() string { return "dnstap" } diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go b/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go deleted file mode 100644 index d96fc6c9a..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/msg/msg.go +++ /dev/null @@ -1,159 +0,0 @@ -package msg - -import ( - "errors" - "net" - "strconv" - "time" - - tap "github.com/dnstap/golang-dnstap" - "github.com/miekg/dns" -) - -// Builder helps to build a Dnstap message. -type Builder struct { - Packed []byte - SocketProto tap.SocketProtocol - SocketFam tap.SocketFamily - Address net.IP - Port uint32 - TimeSec uint64 - TimeNsec uint32 - - err error -} - -// New returns a new Builder -func New() *Builder { - return &Builder{} -} - -// Addr adds the remote address to the message. -func (b *Builder) Addr(remote net.Addr) *Builder { - if b.err != nil { - return b - } - - switch addr := remote.(type) { - case *net.TCPAddr: - b.Address = addr.IP - b.Port = uint32(addr.Port) - b.SocketProto = tap.SocketProtocol_TCP - case *net.UDPAddr: - b.Address = addr.IP - b.Port = uint32(addr.Port) - b.SocketProto = tap.SocketProtocol_UDP - default: - b.err = errors.New("unknown remote address type") - return b - } - - if b.Address.To4() != nil { - b.SocketFam = tap.SocketFamily_INET - } else { - b.SocketFam = tap.SocketFamily_INET6 - } - return b -} - -// Msg adds the raw DNS message to the dnstap message. -func (b *Builder) Msg(m *dns.Msg) *Builder { - if b.err != nil { - return b - } - - b.Packed, b.err = m.Pack() - return b -} - -// HostPort adds the remote address as encoded by dnsutil.ParseHostPortOrFile to the message. -func (b *Builder) HostPort(addr string) *Builder { - ip, port, err := net.SplitHostPort(addr) - if err != nil { - b.err = err - return b - } - p, err := strconv.ParseUint(port, 10, 32) - if err != nil { - b.err = err - return b - } - b.Port = uint32(p) - - if ip := net.ParseIP(ip); ip != nil { - b.Address = []byte(ip) - if ip := ip.To4(); ip != nil { - b.SocketFam = tap.SocketFamily_INET - } else { - b.SocketFam = tap.SocketFamily_INET6 - } - return b - } - b.err = errors.New("not an ip address") - return b -} - -// Time adds the timestamp to the message. -func (b *Builder) Time(ts time.Time) *Builder { - b.TimeSec = uint64(ts.Unix()) - b.TimeNsec = uint32(ts.Nanosecond()) - return b -} - -// ToClientResponse transforms Data into a client response message. -func (b *Builder) ToClientResponse() (*tap.Message, error) { - t := tap.Message_CLIENT_RESPONSE - return &tap.Message{ - Type: &t, - SocketFamily: &b.SocketFam, - SocketProtocol: &b.SocketProto, - ResponseTimeSec: &b.TimeSec, - ResponseTimeNsec: &b.TimeNsec, - ResponseMessage: b.Packed, - QueryAddress: b.Address, - QueryPort: &b.Port, - }, b.err -} - -// ToClientQuery transforms Data into a client query message. -func (b *Builder) ToClientQuery() (*tap.Message, error) { - t := tap.Message_CLIENT_QUERY - return &tap.Message{ - Type: &t, - SocketFamily: &b.SocketFam, - SocketProtocol: &b.SocketProto, - QueryTimeSec: &b.TimeSec, - QueryTimeNsec: &b.TimeNsec, - QueryMessage: b.Packed, - QueryAddress: b.Address, - QueryPort: &b.Port, - }, b.err -} - -// ToOutsideQuery transforms the data into a forwarder or resolver query message. -func (b *Builder) ToOutsideQuery(t tap.Message_Type) (*tap.Message, error) { - return &tap.Message{ - Type: &t, - SocketFamily: &b.SocketFam, - SocketProtocol: &b.SocketProto, - QueryTimeSec: &b.TimeSec, - QueryTimeNsec: &b.TimeNsec, - QueryMessage: b.Packed, - ResponseAddress: b.Address, - ResponsePort: &b.Port, - }, b.err -} - -// ToOutsideResponse transforms the data into a forwarder or resolver response message. -func (b *Builder) ToOutsideResponse(t tap.Message_Type) (*tap.Message, error) { - return &tap.Message{ - Type: &t, - SocketFamily: &b.SocketFam, - SocketProtocol: &b.SocketProto, - ResponseTimeSec: &b.TimeSec, - ResponseTimeNsec: &b.TimeNsec, - ResponseMessage: b.Packed, - ResponseAddress: b.Address, - ResponsePort: &b.Port, - }, b.err -} diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/setup.go b/vendor/github.com/coredns/coredns/plugin/dnstap/setup.go deleted file mode 100644 index 70896722a..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/setup.go +++ /dev/null @@ -1,96 +0,0 @@ -package dnstap - -import ( - "strings" - - "github.com/coredns/coredns/core/dnsserver" - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/dnstap/dnstapio" - clog "github.com/coredns/coredns/plugin/pkg/log" - "github.com/coredns/coredns/plugin/pkg/parse" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyfile" -) - -var log = clog.NewWithPlugin("dnstap") - -func init() { - caddy.RegisterPlugin("dnstap", caddy.Plugin{ - ServerType: "dns", - Action: wrapSetup, - }) -} - -func wrapSetup(c *caddy.Controller) error { - if err := setup(c); err != nil { - return plugin.Error("dnstap", err) - } - return nil -} - -type config struct { - target string - socket bool - full bool -} - -func parseConfig(d *caddyfile.Dispenser) (c config, err error) { - d.Next() // directive name - - if !d.Args(&c.target) { - return c, d.ArgErr() - } - - if strings.HasPrefix(c.target, "tcp://") { - // remote IP endpoint - servers, err := parse.HostPortOrFile(c.target[6:]) - if err != nil { - return c, d.ArgErr() - } - c.target = servers[0] - } else { - // default to UNIX socket - if strings.HasPrefix(c.target, "unix://") { - c.target = c.target[7:] - } - c.socket = true - } - - c.full = d.NextArg() && d.Val() == "full" - - return -} - -func setup(c *caddy.Controller) error { - conf, err := parseConfig(&c.Dispenser) - if err != nil { - return err - } - - dio := dnstapio.New(conf.target, conf.socket) - dnstap := Dnstap{IO: dio, JoinRawMessage: conf.full} - - c.OnStartup(func() error { - dio.Connect() - return nil - }) - - c.OnRestart(func() error { - dio.Close() - return nil - }) - - c.OnFinalShutdown(func() error { - dio.Close() - return nil - }) - - dnsserver.GetConfig(c).AddPlugin( - func(next plugin.Handler) plugin.Handler { - dnstap.Next = next - return dnstap - }) - - return nil -} diff --git a/vendor/github.com/coredns/coredns/plugin/dnstap/taprw/writer.go b/vendor/github.com/coredns/coredns/plugin/dnstap/taprw/writer.go deleted file mode 100644 index 06e6c941d..000000000 --- a/vendor/github.com/coredns/coredns/plugin/dnstap/taprw/writer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package taprw takes a query and intercepts the response. -// It will log both after the response is written. -package taprw - -import ( - "fmt" - "time" - - "github.com/coredns/coredns/plugin/dnstap/msg" - - tap "github.com/dnstap/golang-dnstap" - "github.com/miekg/dns" -) - -// SendOption stores the flag to indicate whether a certain DNSTap message to -// be sent out or not. -type SendOption struct { - Cq bool - Cr bool -} - -// Tapper is what ResponseWriter needs to log to dnstap. -type Tapper interface { - TapMessage(*tap.Message) - Pack() bool -} - -// ResponseWriter captures the client response and logs the query to dnstap. -// Single request use. -// SendOption configures Dnstap to selectively send Dnstap messages. Default is send all. -type ResponseWriter struct { - QueryEpoch time.Time - Query *dns.Msg - dns.ResponseWriter - Tapper - Send *SendOption - - dnstapErr error -} - -// DnstapError check if a dnstap error occurred during Write and returns it. -func (w *ResponseWriter) DnstapError() error { - return w.dnstapErr -} - -// WriteMsg writes back the response to the client and THEN works on logging the request -// and response to dnstap. -func (w *ResponseWriter) WriteMsg(resp *dns.Msg) (writeErr error) { - writeErr = w.ResponseWriter.WriteMsg(resp) - writeEpoch := time.Now() - - b := msg.New().Time(w.QueryEpoch).Addr(w.RemoteAddr()) - - if w.Send == nil || w.Send.Cq { - if w.Pack() { - b.Msg(w.Query) - } - if m, err := b.ToClientQuery(); err != nil { - w.dnstapErr = fmt.Errorf("client query: %s", err) - } else { - w.TapMessage(m) - } - } - - if w.Send == nil || w.Send.Cr { - if writeErr == nil { - if w.Pack() { - b.Msg(resp) - } - if m, err := b.Time(writeEpoch).ToClientResponse(); err != nil { - w.dnstapErr = fmt.Errorf("client response: %s", err) - } else { - w.TapMessage(m) - } - } - } - - return writeErr -} diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/OWNERS b/vendor/github.com/coredns/coredns/plugin/metadata/OWNERS deleted file mode 100644 index 9355c9e98..000000000 --- a/vendor/github.com/coredns/coredns/plugin/metadata/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -reviewers: - - ekleiner - - fturib - - miekg -approvers: - - ekleiner - - fturib - - miekg diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/README.md b/vendor/github.com/coredns/coredns/plugin/metadata/README.md deleted file mode 100644 index c69650927..000000000 --- a/vendor/github.com/coredns/coredns/plugin/metadata/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# metadata - -## Name - -*metadata* - enable a meta data collector. - -## Description - -By enabling *metadata* any plugin that implements [metadata.Provider -interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) will be called for -each DNS query, at beginning of the process for that query, in order to add it's own meta data to -context. - -The meta data collected will be available for all plugins, via the Context parameter provided in the -ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve -metadata a plugin might be interested in. - -The meta data is added by setting a label with a value in the context. These labels should be named -`plugin/NAME`, where **NAME** is something descriptive. The only hard requirement the *metadata* -plugin enforces is that the labels contains a slash. See the documentation for -`metadata.SetValueFunc`. - -The value stored is a string. The empty string signals "no meta data". See the documentation for -`metadata.ValueFunc` on how to retrieve this. - -## Syntax - -~~~ -metadata [ZONES... ] -~~~ - -* **ZONES** zones metadata should be invoked for. - -## Plugins - -`metadata.Provider` interface needs to be implemented by each plugin willing to provide metadata -information for other plugins. It will be called by metadata and gather the information from all -plugins in context. - -Note: this method should work quickly, because it is called for every request. - -## Examples - -The *rewrite* plugin uses meta data to rewrite requests. - -## Also See - -The [Provider interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) and -the [package level](https://godoc.org/github.com/coredns/coredns/plugin/metadata) documentation. diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/metadata.go b/vendor/github.com/coredns/coredns/plugin/metadata/metadata.go deleted file mode 100644 index 4abe57ddf..000000000 --- a/vendor/github.com/coredns/coredns/plugin/metadata/metadata.go +++ /dev/null @@ -1,39 +0,0 @@ -package metadata - -import ( - "context" - - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/request" - - "github.com/miekg/dns" -) - -// Metadata implements collecting metadata information from all plugins that -// implement the Provider interface. -type Metadata struct { - Zones []string - Providers []Provider - Next plugin.Handler -} - -// Name implements the Handler interface. -func (m *Metadata) Name() string { return "metadata" } - -// ServeDNS implements the plugin.Handler interface. -func (m *Metadata) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - - ctx = context.WithValue(ctx, key{}, md{}) - - state := request.Request{W: w, Req: r} - if plugin.Zones(m.Zones).Matches(state.Name()) != "" { - // Go through all Providers and collect metadata. - for _, p := range m.Providers { - ctx = p.Metadata(ctx, state) - } - } - - rcode, err := plugin.NextOrFailure(m.Name(), m.Next, ctx, w, r) - - return rcode, err -} diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/provider.go b/vendor/github.com/coredns/coredns/plugin/metadata/provider.go deleted file mode 100644 index b22064200..000000000 --- a/vendor/github.com/coredns/coredns/plugin/metadata/provider.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package metadata provides an API that allows plugins to add metadata to the context. -// Each metadata is stored under a label that has the form /. Each metadata -// is returned as a Func. When Func is called the metadata is returned. If Func is expensive to -// execute it is its responsibility to provide some form of caching. During the handling of a -// query it is expected the metadata stays constant. -// -// Basic example: -// -// Implement the Provider interface for a plugin p: -// -// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { -// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" }) -// return ctx -// } -// -// Basic example with caching: -// -// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { -// cached := "" -// f := func() string { -// if cached != "" { -// return cached -// } -// cached = expensiveFunc() -// return cached -// } -// metadata.SetValueFunc(ctx, "test/something", f) -// return ctx -// } -// -// If you need access to this metadata from another plugin: -// -// // ... -// valueFunc := metadata.ValueFunc(ctx, "test/something") -// value := valueFunc() -// // use 'value' -// -package metadata - -import ( - "context" - "strings" - - "github.com/coredns/coredns/request" -) - -// Provider interface needs to be implemented by each plugin willing to provide -// metadata information for other plugins. -type Provider interface { - // Metadata adds metadata to the context and returns a (potentially) new context. - // Note: this method should work quickly, because it is called for every request - // from the metadata plugin. - Metadata(ctx context.Context, state request.Request) context.Context -} - -// Func is the type of function in the metadata, when called they return the value of the label. -type Func func() string - -// IsLabel checks that the provided name is a valid label name, i.e. two words separated by a slash. -func IsLabel(label string) bool { - p := strings.Index(label, "/") - if p <= 0 || p >= len(label)-1 { - // cannot accept namespace empty nor label empty - return false - } - if strings.LastIndex(label, "/") != p { - // several slash in the Label - return false - } - return true - -} - -// Labels returns all metadata keys stored in the context. These label names should be named -// as: plugin/NAME, where NAME is something descriptive. -func Labels(ctx context.Context) []string { - if metadata := ctx.Value(key{}); metadata != nil { - if m, ok := metadata.(md); ok { - return keys(m) - } - } - return nil -} - -// ValueFunc returns the value function of label. If none can be found nil is returned. Calling the -// function returns the value of the label. -func ValueFunc(ctx context.Context, label string) Func { - if metadata := ctx.Value(key{}); metadata != nil { - if m, ok := metadata.(md); ok { - return m[label] - } - } - return nil -} - -// SetValueFunc set the metadata label to the value function. If no metadata can be found this is a noop and -// false is returned. Any existing value is overwritten. -func SetValueFunc(ctx context.Context, label string, f Func) bool { - if metadata := ctx.Value(key{}); metadata != nil { - if m, ok := metadata.(md); ok { - m[label] = f - return true - } - } - return false -} - -// md is metadata information storage. -type md map[string]Func - -// key defines the type of key that is used to save metadata into the context. -type key struct{} - -func keys(m map[string]Func) []string { - s := make([]string, len(m)) - i := 0 - for k := range m { - s[i] = k - i++ - } - return s -} diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/setup.go b/vendor/github.com/coredns/coredns/plugin/metadata/setup.go deleted file mode 100644 index 282bcf7d9..000000000 --- a/vendor/github.com/coredns/coredns/plugin/metadata/setup.go +++ /dev/null @@ -1,61 +0,0 @@ -package metadata - -import ( - "github.com/coredns/coredns/core/dnsserver" - "github.com/coredns/coredns/plugin" - - "github.com/mholt/caddy" -) - -func init() { - caddy.RegisterPlugin("metadata", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} - -func setup(c *caddy.Controller) error { - m, err := metadataParse(c) - if err != nil { - return err - } - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - m.Next = next - return m - }) - - c.OnStartup(func() error { - plugins := dnsserver.GetConfig(c).Handlers() - for _, p := range plugins { - if met, ok := p.(Provider); ok { - m.Providers = append(m.Providers, met) - } - } - return nil - }) - - return nil -} - -func metadataParse(c *caddy.Controller) (*Metadata, error) { - m := &Metadata{} - c.Next() - zones := c.RemainingArgs() - - if len(zones) != 0 { - m.Zones = zones - for i := 0; i < len(m.Zones); i++ { - m.Zones[i] = plugin.Host(m.Zones[i]).Normalize() - } - } else { - m.Zones = make([]string, len(c.ServerBlockKeys)) - for i := 0; i < len(c.ServerBlockKeys); i++ { - m.Zones[i] = plugin.Host(c.ServerBlockKeys[i]).Normalize() - } - } - - if c.NextBlock() || c.Next() { - return nil, plugin.Error("metadata", c.ArgErr()) - } - return m, nil -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c83641619..bc52e96f2 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a..792994785 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5..205c28d68 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff47..1be8ce945 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a7..f78d89fc1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bac..b04edb7d7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/dnstap/golang-dnstap/.gitignore b/vendor/github.com/dnstap/golang-dnstap/.gitignore deleted file mode 100644 index 1377554eb..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp diff --git a/vendor/github.com/dnstap/golang-dnstap/COPYRIGHT b/vendor/github.com/dnstap/golang-dnstap/COPYRIGHT deleted file mode 100644 index 169e84827..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/COPYRIGHT +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2013-2014 by Farsight Security, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - diff --git a/vendor/github.com/dnstap/golang-dnstap/FrameStreamInput.go b/vendor/github.com/dnstap/golang-dnstap/FrameStreamInput.go deleted file mode 100644 index 5d58aa5b6..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/FrameStreamInput.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2013-2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "io" - "log" - "os" - - "github.com/farsightsec/golang-framestream" -) - -type FrameStreamInput struct { - wait chan bool - decoder *framestream.Decoder -} - -func NewFrameStreamInput(r io.ReadWriter, bi bool) (input *FrameStreamInput, err error) { - input = new(FrameStreamInput) - decoderOptions := framestream.DecoderOptions{ - ContentType: FSContentType, - Bidirectional: bi, - } - input.decoder, err = framestream.NewDecoder(r, &decoderOptions) - if err != nil { - return - } - input.wait = make(chan bool) - return -} - -func NewFrameStreamInputFromFilename(fname string) (input *FrameStreamInput, err error) { - file, err := os.Open(fname) - if err != nil { - return nil, err - } - input, err = NewFrameStreamInput(file, false) - return -} - -func (input *FrameStreamInput) ReadInto(output chan []byte) { - for { - buf, err := input.decoder.Decode() - if err != nil { - if err != io.EOF { - log.Printf("framestream.Decoder.Decode() failed: %s\n", err) - } - break - } - newbuf := make([]byte, len(buf)) - copy(newbuf, buf) - output <- newbuf - } - close(input.wait) -} - -func (input *FrameStreamInput) Wait() { - <-input.wait -} diff --git a/vendor/github.com/dnstap/golang-dnstap/FrameStreamOutput.go b/vendor/github.com/dnstap/golang-dnstap/FrameStreamOutput.go deleted file mode 100644 index d00bba892..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/FrameStreamOutput.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "io" - "log" - "os" - - "github.com/farsightsec/golang-framestream" -) - -type FrameStreamOutput struct { - outputChannel chan []byte - wait chan bool - enc *framestream.Encoder -} - -func NewFrameStreamOutput(w io.Writer) (o *FrameStreamOutput, err error) { - o = new(FrameStreamOutput) - o.outputChannel = make(chan []byte, outputChannelSize) - o.enc, err = framestream.NewEncoder(w, &framestream.EncoderOptions{ContentType: FSContentType}) - if err != nil { - return - } - o.wait = make(chan bool) - return -} - -func NewFrameStreamOutputFromFilename(fname string) (o *FrameStreamOutput, err error) { - if fname == "" || fname == "-" { - return NewFrameStreamOutput(os.Stdout) - } - w, err := os.Create(fname) - if err != nil { - return - } - return NewFrameStreamOutput(w) -} - -func (o *FrameStreamOutput) GetOutputChannel() chan []byte { - return o.outputChannel -} - -func (o *FrameStreamOutput) RunOutputLoop() { - for frame := range o.outputChannel { - if _, err := o.enc.Write(frame); err != nil { - log.Fatalf("framestream.Encoder.Write() failed: %s\n", err) - break - } - } - close(o.wait) -} - -func (o *FrameStreamOutput) Close() { - close(o.outputChannel) - <-o.wait - o.enc.Flush() - o.enc.Close() -} diff --git a/vendor/github.com/dnstap/golang-dnstap/FrameStreamSockInput.go b/vendor/github.com/dnstap/golang-dnstap/FrameStreamSockInput.go deleted file mode 100644 index 443e46cdb..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/FrameStreamSockInput.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2013-2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "log" - "net" - "os" -) - -type FrameStreamSockInput struct { - wait chan bool - listener net.Listener -} - -func NewFrameStreamSockInput(listener net.Listener) (input *FrameStreamSockInput) { - input = new(FrameStreamSockInput) - input.listener = listener - return -} - -func NewFrameStreamSockInputFromPath(socketPath string) (input *FrameStreamSockInput, err error) { - os.Remove(socketPath) - listener, err := net.Listen("unix", socketPath) - if err != nil { - return - } - return NewFrameStreamSockInput(listener), nil -} - -func (input *FrameStreamSockInput) ReadInto(output chan []byte) { - for { - conn, err := input.listener.Accept() - if err != nil { - log.Printf("net.Listener.Accept() failed: %s\n", err) - continue - } - i, err := NewFrameStreamInput(conn, true) - if err != nil { - log.Printf("dnstap.NewFrameStreamInput() failed: %s\n", err) - continue - } - log.Printf("dnstap.FrameStreamSockInput: accepted a socket connection\n") - go i.ReadInto(output) - } -} - -func (input *FrameStreamSockInput) Wait() { - select {} -} diff --git a/vendor/github.com/dnstap/golang-dnstap/LICENSE b/vendor/github.com/dnstap/golang-dnstap/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/dnstap/golang-dnstap/QuietTextFormat.go b/vendor/github.com/dnstap/golang-dnstap/QuietTextFormat.go deleted file mode 100644 index 69e6fe8d7..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/QuietTextFormat.go +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2013-2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "bytes" - "fmt" - "net" - "strconv" - "time" - - "github.com/miekg/dns" -) - -const quietTimeFormat = "15:04:05" - -func textConvertTime(s *bytes.Buffer, secs *uint64, nsecs *uint32) { - if secs != nil { - s.WriteString(time.Unix(int64(*secs), 0).Format(quietTimeFormat)) - } else { - s.WriteString("??:??:??") - } - if nsecs != nil { - s.WriteString(fmt.Sprintf(".%06d", *nsecs/1000)) - } else { - s.WriteString(".??????") - } -} - -func textConvertIP(s *bytes.Buffer, ip []byte) { - if ip != nil { - s.WriteString(net.IP(ip).String()) - } else { - s.WriteString("MISSING_ADDRESS") - } -} - -func textConvertMessage(m *Message, s *bytes.Buffer) { - isQuery := false - printQueryAddress := false - - switch *m.Type { - case Message_CLIENT_QUERY, - Message_RESOLVER_QUERY, - Message_AUTH_QUERY, - Message_FORWARDER_QUERY, - Message_TOOL_QUERY: - isQuery = true - case Message_CLIENT_RESPONSE, - Message_RESOLVER_RESPONSE, - Message_AUTH_RESPONSE, - Message_FORWARDER_RESPONSE, - Message_TOOL_RESPONSE: - isQuery = false - default: - s.WriteString("[unhandled Message.Type]\n") - return - } - - if isQuery { - textConvertTime(s, m.QueryTimeSec, m.QueryTimeNsec) - } else { - textConvertTime(s, m.ResponseTimeSec, m.ResponseTimeNsec) - } - s.WriteString(" ") - - switch *m.Type { - case Message_CLIENT_QUERY, - Message_CLIENT_RESPONSE: - { - s.WriteString("C") - } - case Message_RESOLVER_QUERY, - Message_RESOLVER_RESPONSE: - { - s.WriteString("R") - } - case Message_AUTH_QUERY, - Message_AUTH_RESPONSE: - { - s.WriteString("A") - } - case Message_FORWARDER_QUERY, - Message_FORWARDER_RESPONSE: - { - s.WriteString("F") - } - case Message_STUB_QUERY, - Message_STUB_RESPONSE: - { - s.WriteString("S") - } - case Message_TOOL_QUERY, - Message_TOOL_RESPONSE: - { - s.WriteString("T") - } - } - - if isQuery { - s.WriteString("Q ") - } else { - s.WriteString("R ") - } - - switch *m.Type { - case Message_CLIENT_QUERY, - Message_CLIENT_RESPONSE, - Message_AUTH_QUERY, - Message_AUTH_RESPONSE: - printQueryAddress = true - } - - if printQueryAddress { - textConvertIP(s, m.QueryAddress) - } else { - textConvertIP(s, m.ResponseAddress) - } - s.WriteString(" ") - - if m.SocketProtocol != nil { - s.WriteString(m.SocketProtocol.String()) - } - s.WriteString(" ") - - var err error - msg := new(dns.Msg) - if isQuery { - s.WriteString(strconv.Itoa(len(m.QueryMessage))) - s.WriteString("b ") - err = msg.Unpack(m.QueryMessage) - } else { - s.WriteString(strconv.Itoa(len(m.ResponseMessage))) - s.WriteString("b ") - err = msg.Unpack(m.ResponseMessage) - } - - if err != nil { - s.WriteString("X ") - } else { - s.WriteString("\"" + msg.Question[0].Name + "\" ") - s.WriteString(dns.Class(msg.Question[0].Qclass).String() + " ") - s.WriteString(dns.Type(msg.Question[0].Qtype).String()) - } - - s.WriteString("\n") -} - -func TextFormat(dt *Dnstap) (out []byte, ok bool) { - var s bytes.Buffer - - if *dt.Type == Dnstap_MESSAGE { - textConvertMessage(dt.Message, &s) - return s.Bytes(), true - } - - return nil, false -} diff --git a/vendor/github.com/dnstap/golang-dnstap/README b/vendor/github.com/dnstap/golang-dnstap/README deleted file mode 100644 index e59effb8d..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/README +++ /dev/null @@ -1,15 +0,0 @@ -dnstap: flexible, structured event replication format for DNS servers ---------------------------------------------------------------------- - -dnstap implements an encoding format for DNS server events. It uses a -lightweight framing on top of event payloads encoded using Protocol Buffers and -is transport neutral. - -dnstap can represent internal state inside a DNS server that is difficult to -obtain using techniques based on traditional packet capture or unstructured -textual format logging. - -This repository contains a command-line tool named "dnstap" developed in the -Go programming language. It can be installed with the following command: - - go get -u github.com/dnstap/golang-dnstap/dnstap diff --git a/vendor/github.com/dnstap/golang-dnstap/TextOutput.go b/vendor/github.com/dnstap/golang-dnstap/TextOutput.go deleted file mode 100644 index 5fa2c00e0..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/TextOutput.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "bufio" - "io" - "log" - "os" - - "github.com/golang/protobuf/proto" -) - -type TextFormatFunc func(*Dnstap) ([]byte, bool) - -type TextOutput struct { - format TextFormatFunc - outputChannel chan []byte - wait chan bool - writer *bufio.Writer -} - -func NewTextOutput(writer io.Writer, format TextFormatFunc) (o *TextOutput) { - o = new(TextOutput) - o.format = format - o.outputChannel = make(chan []byte, outputChannelSize) - o.writer = bufio.NewWriter(writer) - o.wait = make(chan bool) - return -} - -func NewTextOutputFromFilename(fname string, format TextFormatFunc) (o *TextOutput, err error) { - if fname == "" || fname == "-" { - return NewTextOutput(os.Stdout, format), nil - } - writer, err := os.Create(fname) - if err != nil { - return - } - return NewTextOutput(writer, format), nil -} - -func (o *TextOutput) GetOutputChannel() chan []byte { - return o.outputChannel -} - -func (o *TextOutput) RunOutputLoop() { - dt := &Dnstap{} - for frame := range o.outputChannel { - if err := proto.Unmarshal(frame, dt); err != nil { - log.Fatalf("dnstap.TextOutput: proto.Unmarshal() failed: %s\n", err) - break - } - buf, ok := o.format(dt) - if !ok { - log.Fatalf("dnstap.TextOutput: text format function failed\n") - break - } - if _, err := o.writer.Write(buf); err != nil { - log.Fatalf("dnstap.TextOutput: write failed: %s\n", err) - break - } - o.writer.Flush() - } - close(o.wait) -} - -func (o *TextOutput) Close() { - close(o.outputChannel) - <-o.wait - o.writer.Flush() -} diff --git a/vendor/github.com/dnstap/golang-dnstap/YamlFormat.go b/vendor/github.com/dnstap/golang-dnstap/YamlFormat.go deleted file mode 100644 index c63a4a3a1..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/YamlFormat.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2013-2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -import ( - "bytes" - "fmt" - "net" - "strconv" - "strings" - "time" - - "github.com/miekg/dns" -) - -const yamlTimeFormat = "2006-01-02 15:04:05.999999999" - -func yamlConvertMessage(m *Message, s *bytes.Buffer) { - s.WriteString(fmt.Sprint(" type: ", m.Type, "\n")) - - if m.QueryTimeSec != nil && m.QueryTimeNsec != nil { - t := time.Unix(int64(*m.QueryTimeSec), int64(*m.QueryTimeNsec)).UTC() - s.WriteString(fmt.Sprint(" query_time: !!timestamp ", t.Format(yamlTimeFormat), "\n")) - } - - if m.ResponseTimeSec != nil && m.ResponseTimeNsec != nil { - t := time.Unix(int64(*m.ResponseTimeSec), int64(*m.ResponseTimeNsec)).UTC() - s.WriteString(fmt.Sprint(" response_time: !!timestamp ", t.Format(yamlTimeFormat), "\n")) - } - - if m.SocketFamily != nil { - s.WriteString(fmt.Sprint(" socket_family: ", m.SocketFamily, "\n")) - } - - if m.SocketProtocol != nil { - s.WriteString(fmt.Sprint(" socket_protocol: ", m.SocketProtocol, "\n")) - } - - if m.QueryAddress != nil { - s.WriteString(fmt.Sprint(" query_address: ", net.IP(m.QueryAddress), "\n")) - } - - if m.ResponseAddress != nil { - s.WriteString(fmt.Sprint(" response_address: ", net.IP(m.ResponseAddress), "\n")) - } - - if m.QueryPort != nil { - s.WriteString(fmt.Sprint(" query_port: ", *m.QueryPort, "\n")) - } - - if m.ResponsePort != nil { - s.WriteString(fmt.Sprint(" response_port: ", *m.ResponsePort, "\n")) - } - - if m.QueryZone != nil { - name, _, err := dns.UnpackDomainName(m.QueryZone, 0) - if err != nil { - s.WriteString(" # query_zone: parse failed\n") - } else { - s.WriteString(fmt.Sprint(" query_zone: ", strconv.Quote(name), "\n")) - } - } - - if m.QueryMessage != nil { - msg := new(dns.Msg) - err := msg.Unpack(m.QueryMessage) - if err != nil { - s.WriteString(" # query_message: parse failed\n") - } else { - s.WriteString(" query_message: |\n") - s.WriteString(" " + strings.Replace(strings.TrimSpace(msg.String()), "\n", "\n ", -1) + "\n") - } - } - if m.ResponseMessage != nil { - msg := new(dns.Msg) - err := msg.Unpack(m.ResponseMessage) - if err != nil { - s.WriteString(fmt.Sprint(" # response_message: parse failed: ", err, "\n")) - } else { - s.WriteString(" response_message: |\n") - s.WriteString(" " + strings.Replace(strings.TrimSpace(msg.String()), "\n", "\n ", -1) + "\n") - } - } - s.WriteString("---\n") -} - -func YamlFormat(dt *Dnstap) (out []byte, ok bool) { - var s bytes.Buffer - - s.WriteString(fmt.Sprint("type: ", dt.Type, "\n")) - if dt.Identity != nil { - s.WriteString(fmt.Sprint("identity: ", strconv.Quote(string(dt.Identity)), "\n")) - } - if dt.Version != nil { - s.WriteString(fmt.Sprint("version: ", strconv.Quote(string(dt.Version)), "\n")) - } - if *dt.Type == Dnstap_MESSAGE { - s.WriteString("message:\n") - yamlConvertMessage(dt.Message, &s) - } - return s.Bytes(), true -} diff --git a/vendor/github.com/dnstap/golang-dnstap/debian/copyright b/vendor/github.com/dnstap/golang-dnstap/debian/copyright deleted file mode 100644 index fdeb5b208..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/debian/copyright +++ /dev/null @@ -1,26 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: golang-dnstap -Source: - -Files: * -Copyright: Copyright (c) 2013, 2014, 2016, 2017 by Farsight Security, Inc. -License: Apache-2.0 - -Files: debian/* -Copyright: 2018 Farsight Security, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". - diff --git a/vendor/github.com/dnstap/golang-dnstap/dnstap.go b/vendor/github.com/dnstap/golang-dnstap/dnstap.go deleted file mode 100644 index d4f786ae7..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/dnstap.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dnstap - -const outputChannelSize = 32 - -var FSContentType = []byte("protobuf:dnstap.Dnstap") - -type Input interface { - ReadInto(chan []byte) - Wait() -} - -type Output interface { - GetOutputChannel() chan []byte - RunOutputLoop() - Close() -} diff --git a/vendor/github.com/dnstap/golang-dnstap/dnstap.pb.go b/vendor/github.com/dnstap/golang-dnstap/dnstap.pb.go deleted file mode 100644 index 43b5fd10a..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/dnstap.pb.go +++ /dev/null @@ -1,448 +0,0 @@ -// Code generated by protoc-gen-go. -// source: dnstap.proto -// DO NOT EDIT! - -/* -Package dnstap is a generated protocol buffer package. - -It is generated from these files: - dnstap.proto - -It has these top-level messages: - Dnstap - Message -*/ -package dnstap - -import proto "github.com/golang/protobuf/proto" -import json "encoding/json" -import math "math" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -// SocketFamily: the network protocol family of a socket. This specifies how -// to interpret "network address" fields. -type SocketFamily int32 - -const ( - SocketFamily_INET SocketFamily = 1 - SocketFamily_INET6 SocketFamily = 2 -) - -var SocketFamily_name = map[int32]string{ - 1: "INET", - 2: "INET6", -} -var SocketFamily_value = map[string]int32{ - "INET": 1, - "INET6": 2, -} - -func (x SocketFamily) Enum() *SocketFamily { - p := new(SocketFamily) - *p = x - return p -} -func (x SocketFamily) String() string { - return proto.EnumName(SocketFamily_name, int32(x)) -} -func (x *SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketFamily_value, data, "SocketFamily") - if err != nil { - return err - } - *x = SocketFamily(value) - return nil -} - -// SocketProtocol: the transport protocol of a socket. This specifies how to -// interpret "transport port" fields. -type SocketProtocol int32 - -const ( - SocketProtocol_UDP SocketProtocol = 1 - SocketProtocol_TCP SocketProtocol = 2 -) - -var SocketProtocol_name = map[int32]string{ - 1: "UDP", - 2: "TCP", -} -var SocketProtocol_value = map[string]int32{ - "UDP": 1, - "TCP": 2, -} - -func (x SocketProtocol) Enum() *SocketProtocol { - p := new(SocketProtocol) - *p = x - return p -} -func (x SocketProtocol) String() string { - return proto.EnumName(SocketProtocol_name, int32(x)) -} -func (x *SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketProtocol_value, data, "SocketProtocol") - if err != nil { - return err - } - *x = SocketProtocol(value) - return nil -} - -// Identifies which field below is filled in. -type Dnstap_Type int32 - -const ( - Dnstap_MESSAGE Dnstap_Type = 1 -) - -var Dnstap_Type_name = map[int32]string{ - 1: "MESSAGE", -} -var Dnstap_Type_value = map[string]int32{ - "MESSAGE": 1, -} - -func (x Dnstap_Type) Enum() *Dnstap_Type { - p := new(Dnstap_Type) - *p = x - return p -} -func (x Dnstap_Type) String() string { - return proto.EnumName(Dnstap_Type_name, int32(x)) -} -func (x *Dnstap_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Dnstap_Type_value, data, "Dnstap_Type") - if err != nil { - return err - } - *x = Dnstap_Type(value) - return nil -} - -type Message_Type int32 - -const ( - // AUTH_QUERY is a DNS query message received from a resolver by an - // authoritative name server, from the perspective of the authorative - // name server. - Message_AUTH_QUERY Message_Type = 1 - // AUTH_RESPONSE is a DNS response message sent from an authoritative - // name server to a resolver, from the perspective of the authoritative - // name server. - Message_AUTH_RESPONSE Message_Type = 2 - // RESOLVER_QUERY is a DNS query message sent from a resolver to an - // authoritative name server, from the perspective of the resolver. - // Resolvers typically clear the RD (recursion desired) bit when - // sending queries. - Message_RESOLVER_QUERY Message_Type = 3 - // RESOLVER_RESPONSE is a DNS response message received from an - // authoritative name server by a resolver, from the perspective of - // the resolver. - Message_RESOLVER_RESPONSE Message_Type = 4 - // CLIENT_QUERY is a DNS query message sent from a client to a DNS - // server which is expected to perform further recursion, from the - // perspective of the DNS server. The client may be a stub resolver or - // forwarder or some other type of software which typically sets the RD - // (recursion desired) bit when querying the DNS server. The DNS server - // may be a simple forwarding proxy or it may be a full recursive - // resolver. - Message_CLIENT_QUERY Message_Type = 5 - // CLIENT_RESPONSE is a DNS response message sent from a DNS server to - // a client, from the perspective of the DNS server. The DNS server - // typically sets the RA (recursion available) bit when responding. - Message_CLIENT_RESPONSE Message_Type = 6 - // FORWARDER_QUERY is a DNS query message sent from a downstream DNS - // server to an upstream DNS server which is expected to perform - // further recursion, from the perspective of the downstream DNS - // server. - Message_FORWARDER_QUERY Message_Type = 7 - // FORWARDER_RESPONSE is a DNS response message sent from an upstream - // DNS server performing recursion to a downstream DNS server, from the - // perspective of the downstream DNS server. - Message_FORWARDER_RESPONSE Message_Type = 8 - // STUB_QUERY is a DNS query message sent from a stub resolver to a DNS - // server, from the perspective of the stub resolver. - Message_STUB_QUERY Message_Type = 9 - // STUB_RESPONSE is a DNS response message sent from a DNS server to a - // stub resolver, from the perspective of the stub resolver. - Message_STUB_RESPONSE Message_Type = 10 - // TOOL_QUERY is a DNS query message sent from a DNS software tool to a - // DNS server, from the perspective of the tool. - Message_TOOL_QUERY Message_Type = 11 - // TOOL_RESPONSE is a DNS response message received by a DNS software - // tool from a DNS server, from the perspective of the tool. - Message_TOOL_RESPONSE Message_Type = 12 -) - -var Message_Type_name = map[int32]string{ - 1: "AUTH_QUERY", - 2: "AUTH_RESPONSE", - 3: "RESOLVER_QUERY", - 4: "RESOLVER_RESPONSE", - 5: "CLIENT_QUERY", - 6: "CLIENT_RESPONSE", - 7: "FORWARDER_QUERY", - 8: "FORWARDER_RESPONSE", - 9: "STUB_QUERY", - 10: "STUB_RESPONSE", - 11: "TOOL_QUERY", - 12: "TOOL_RESPONSE", -} -var Message_Type_value = map[string]int32{ - "AUTH_QUERY": 1, - "AUTH_RESPONSE": 2, - "RESOLVER_QUERY": 3, - "RESOLVER_RESPONSE": 4, - "CLIENT_QUERY": 5, - "CLIENT_RESPONSE": 6, - "FORWARDER_QUERY": 7, - "FORWARDER_RESPONSE": 8, - "STUB_QUERY": 9, - "STUB_RESPONSE": 10, - "TOOL_QUERY": 11, - "TOOL_RESPONSE": 12, -} - -func (x Message_Type) Enum() *Message_Type { - p := new(Message_Type) - *p = x - return p -} -func (x Message_Type) String() string { - return proto.EnumName(Message_Type_name, int32(x)) -} -func (x *Message_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Message_Type_value, data, "Message_Type") - if err != nil { - return err - } - *x = Message_Type(value) - return nil -} - -// "Dnstap": this is the top-level dnstap type, which is a "union" type that -// contains other kinds of dnstap payloads, although currently only one type -// of dnstap payload is defined. -// See: https://developers.google.com/protocol-buffers/docs/techniques#union -type Dnstap struct { - // DNS server identity. - // If enabled, this is the identity string of the DNS server which generated - // this message. Typically this would be the same string as returned by an - // "NSID" (RFC 5001) query. - Identity []byte `protobuf:"bytes,1,opt,name=identity" json:"identity,omitempty"` - // DNS server version. - // If enabled, this is the version string of the DNS server which generated - // this message. Typically this would be the same string as returned by a - // "version.bind" query. - Version []byte `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - // Extra data for this payload. - // This field can be used for adding an arbitrary byte-string annotation to - // the payload. No encoding or interpretation is applied or enforced. - Extra []byte `protobuf:"bytes,3,opt,name=extra" json:"extra,omitempty"` - Type *Dnstap_Type `protobuf:"varint,15,req,name=type,enum=dnstap.Dnstap_Type" json:"type,omitempty"` - // One of the following will be filled in. - Message *Message `protobuf:"bytes,14,opt,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Dnstap) Reset() { *m = Dnstap{} } -func (m *Dnstap) String() string { return proto.CompactTextString(m) } -func (*Dnstap) ProtoMessage() {} - -func (m *Dnstap) GetIdentity() []byte { - if m != nil { - return m.Identity - } - return nil -} - -func (m *Dnstap) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *Dnstap) GetExtra() []byte { - if m != nil { - return m.Extra - } - return nil -} - -func (m *Dnstap) GetType() Dnstap_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return Dnstap_MESSAGE -} - -func (m *Dnstap) GetMessage() *Message { - if m != nil { - return m.Message - } - return nil -} - -// Message: a wire-format (RFC 1035 section 4) DNS message and associated -// metadata. Applications generating "Message" payloads should follow -// certain requirements based on the MessageType, see below. -type Message struct { - // One of the Type values described above. - Type *Message_Type `protobuf:"varint,1,req,name=type,enum=dnstap.Message_Type" json:"type,omitempty"` - // One of the SocketFamily values described above. - SocketFamily *SocketFamily `protobuf:"varint,2,opt,name=socket_family,enum=dnstap.SocketFamily" json:"socket_family,omitempty"` - // One of the SocketProtocol values described above. - SocketProtocol *SocketProtocol `protobuf:"varint,3,opt,name=socket_protocol,enum=dnstap.SocketProtocol" json:"socket_protocol,omitempty"` - // The network address of the message initiator. - // For SocketFamily INET, this field is 4 octets (IPv4 address). - // For SocketFamily INET6, this field is 16 octets (IPv6 address). - QueryAddress []byte `protobuf:"bytes,4,opt,name=query_address" json:"query_address,omitempty"` - // The network address of the message responder. - // For SocketFamily INET, this field is 4 octets (IPv4 address). - // For SocketFamily INET6, this field is 16 octets (IPv6 address). - ResponseAddress []byte `protobuf:"bytes,5,opt,name=response_address" json:"response_address,omitempty"` - // The transport port of the message initiator. - // This is a 16-bit UDP or TCP port number, depending on SocketProtocol. - QueryPort *uint32 `protobuf:"varint,6,opt,name=query_port" json:"query_port,omitempty"` - // The transport port of the message responder. - // This is a 16-bit UDP or TCP port number, depending on SocketProtocol. - ResponsePort *uint32 `protobuf:"varint,7,opt,name=response_port" json:"response_port,omitempty"` - // The time at which the DNS query message was sent or received, depending - // on whether this is an AUTH_QUERY, RESOLVER_QUERY, or CLIENT_QUERY. - // This is the number of seconds since the UNIX epoch. - QueryTimeSec *uint64 `protobuf:"varint,8,opt,name=query_time_sec" json:"query_time_sec,omitempty"` - // The time at which the DNS query message was sent or received. - // This is the seconds fraction, expressed as a count of nanoseconds. - QueryTimeNsec *uint32 `protobuf:"fixed32,9,opt,name=query_time_nsec" json:"query_time_nsec,omitempty"` - // The initiator's original wire-format DNS query message, verbatim. - QueryMessage []byte `protobuf:"bytes,10,opt,name=query_message" json:"query_message,omitempty"` - // The "zone" or "bailiwick" pertaining to the DNS query message. - // This is a wire-format DNS domain name. - QueryZone []byte `protobuf:"bytes,11,opt,name=query_zone" json:"query_zone,omitempty"` - // The time at which the DNS response message was sent or received, - // depending on whether this is an AUTH_RESPONSE, RESOLVER_RESPONSE, or - // CLIENT_RESPONSE. - // This is the number of seconds since the UNIX epoch. - ResponseTimeSec *uint64 `protobuf:"varint,12,opt,name=response_time_sec" json:"response_time_sec,omitempty"` - // The time at which the DNS response message was sent or received. - // This is the seconds fraction, expressed as a count of nanoseconds. - ResponseTimeNsec *uint32 `protobuf:"fixed32,13,opt,name=response_time_nsec" json:"response_time_nsec,omitempty"` - // The responder's original wire-format DNS response message, verbatim. - ResponseMessage []byte `protobuf:"bytes,14,opt,name=response_message" json:"response_message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -func (m *Message) GetType() Message_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return Message_AUTH_QUERY -} - -func (m *Message) GetSocketFamily() SocketFamily { - if m != nil && m.SocketFamily != nil { - return *m.SocketFamily - } - return SocketFamily_INET -} - -func (m *Message) GetSocketProtocol() SocketProtocol { - if m != nil && m.SocketProtocol != nil { - return *m.SocketProtocol - } - return SocketProtocol_UDP -} - -func (m *Message) GetQueryAddress() []byte { - if m != nil { - return m.QueryAddress - } - return nil -} - -func (m *Message) GetResponseAddress() []byte { - if m != nil { - return m.ResponseAddress - } - return nil -} - -func (m *Message) GetQueryPort() uint32 { - if m != nil && m.QueryPort != nil { - return *m.QueryPort - } - return 0 -} - -func (m *Message) GetResponsePort() uint32 { - if m != nil && m.ResponsePort != nil { - return *m.ResponsePort - } - return 0 -} - -func (m *Message) GetQueryTimeSec() uint64 { - if m != nil && m.QueryTimeSec != nil { - return *m.QueryTimeSec - } - return 0 -} - -func (m *Message) GetQueryTimeNsec() uint32 { - if m != nil && m.QueryTimeNsec != nil { - return *m.QueryTimeNsec - } - return 0 -} - -func (m *Message) GetQueryMessage() []byte { - if m != nil { - return m.QueryMessage - } - return nil -} - -func (m *Message) GetQueryZone() []byte { - if m != nil { - return m.QueryZone - } - return nil -} - -func (m *Message) GetResponseTimeSec() uint64 { - if m != nil && m.ResponseTimeSec != nil { - return *m.ResponseTimeSec - } - return 0 -} - -func (m *Message) GetResponseTimeNsec() uint32 { - if m != nil && m.ResponseTimeNsec != nil { - return *m.ResponseTimeNsec - } - return 0 -} - -func (m *Message) GetResponseMessage() []byte { - if m != nil { - return m.ResponseMessage - } - return nil -} - -func init() { - proto.RegisterEnum("dnstap.SocketFamily", SocketFamily_name, SocketFamily_value) - proto.RegisterEnum("dnstap.SocketProtocol", SocketProtocol_name, SocketProtocol_value) - proto.RegisterEnum("dnstap.Dnstap_Type", Dnstap_Type_name, Dnstap_Type_value) - proto.RegisterEnum("dnstap.Message_Type", Message_Type_name, Message_Type_value) -} diff --git a/vendor/github.com/dnstap/golang-dnstap/dnstap.pb/LICENSE b/vendor/github.com/dnstap/golang-dnstap/dnstap.pb/LICENSE deleted file mode 100644 index 0e259d42c..000000000 --- a/vendor/github.com/dnstap/golang-dnstap/dnstap.pb/LICENSE +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/vendor/github.com/farsightsec/golang-framestream/.gitignore b/vendor/github.com/farsightsec/golang-framestream/.gitignore deleted file mode 100644 index 9e8224b7f..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.*swp diff --git a/vendor/github.com/farsightsec/golang-framestream/COPYRIGHT b/vendor/github.com/farsightsec/golang-framestream/COPYRIGHT deleted file mode 100644 index a11f18efa..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/COPYRIGHT +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014 by Farsight Security, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/farsightsec/golang-framestream/Control.go b/vendor/github.com/farsightsec/golang-framestream/Control.go deleted file mode 100644 index 6345b2c26..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/Control.go +++ /dev/null @@ -1,156 +0,0 @@ -package framestream - -import ( - "bufio" - "bytes" - "encoding/binary" - "io" -) - -const CONTROL_ACCEPT = 0x01 -const CONTROL_START = 0x02 -const CONTROL_STOP = 0x03 -const CONTROL_READY = 0x04 -const CONTROL_FINISH = 0x05 - -const CONTROL_FIELD_CONTENT_TYPE = 0x01 - -type ControlFrame struct { - ControlType uint32 - ContentTypes [][]byte -} - -var ControlStart = ControlFrame{ControlType: CONTROL_START} -var ControlStop = ControlFrame{ControlType: CONTROL_STOP} -var ControlReady = ControlFrame{ControlType: CONTROL_READY} -var ControlAccept = ControlFrame{ControlType: CONTROL_ACCEPT} -var ControlFinish = ControlFrame{ControlType: CONTROL_FINISH} - -func (c *ControlFrame) Encode(w io.Writer) (err error) { - var buf bytes.Buffer - err = binary.Write(&buf, binary.BigEndian, c.ControlType) - if err != nil { - return - } - for _, ctype := range c.ContentTypes { - err = binary.Write(&buf, binary.BigEndian, uint32(CONTROL_FIELD_CONTENT_TYPE)) - if err != nil { - return - } - - err = binary.Write(&buf, binary.BigEndian, uint32(len(ctype))) - if err != nil { - return - } - - _, err = buf.Write(ctype) - if err != nil { - return - } - } - - err = binary.Write(w, binary.BigEndian, uint32(0)) - if err != nil { - return - } - err = binary.Write(w, binary.BigEndian, uint32(buf.Len())) - if err != nil { - return - } - _, err = buf.WriteTo(w) - return -} - -func (c *ControlFrame) EncodeFlush(w *bufio.Writer) error { - if err := c.Encode(w); err != nil { - return err - } - return w.Flush() -} - -func (c *ControlFrame) Decode(r io.Reader) (err error) { - var cflen uint32 - err = binary.Read(r, binary.BigEndian, &cflen) - if err != nil { - return - } - - err = binary.Read(r, binary.BigEndian, &c.ControlType) - if err != nil { - return - } - - cflen -= 4 - if cflen > 0 { - cfields := make([]byte, int(cflen)) - _, err = io.ReadFull(r, cfields) - if err != nil { - return - } - - for len(cfields) > 8 { - cftype := binary.BigEndian.Uint32(cfields[:4]) - cfields = cfields[4:] - if cftype != CONTROL_FIELD_CONTENT_TYPE { - return ErrDecode - } - - cflen := int(binary.BigEndian.Uint32(cfields[:4])) - cfields = cfields[4:] - if cflen > len(cfields) { - return ErrDecode - } - - c.ContentTypes = append(c.ContentTypes, cfields[:cflen]) - cfields = cfields[cflen:] - } - - if len(cfields) > 0 { - return ErrDecode - } - } - return -} - -func (c *ControlFrame) DecodeEscape(r io.Reader) error { - var zero uint32 - err := binary.Read(r, binary.BigEndian, &zero) - if err != nil { - return err - } - if zero != 0 { - return ErrDecode - } - return c.Decode(r) -} - -func (c *ControlFrame) DecodeTypeEscape(r io.Reader, ctype uint32) error { - err := c.DecodeEscape(r) - if err != nil { - return err - } - - if ctype != c.ControlType { - return ErrDecode - } - - return nil -} - -func (c *ControlFrame) MatchContentType(ctype []byte) bool { - if ctype == nil { - return true - } - for _, cfctype := range c.ContentTypes { - if bytes.Compare(ctype, cfctype) == 0 { - return true - } - } - return false -} - -func (c *ControlFrame) SetContentType(ctype []byte) { - if ctype != nil { - c.ContentTypes = [][]byte{ctype} - } -} diff --git a/vendor/github.com/farsightsec/golang-framestream/Decoder.go b/vendor/github.com/farsightsec/golang-framestream/Decoder.go deleted file mode 100644 index 1ee6296fa..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/Decoder.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package framestream - -import ( - "bufio" - "encoding/binary" - "io" - "time" -) - -type DecoderOptions struct { - MaxPayloadSize uint32 - ContentType []byte - Bidirectional bool - Timeout time.Duration -} - -type Decoder struct { - buf []byte - opt DecoderOptions - reader *bufio.Reader - writer *bufio.Writer - stopped bool -} - -func NewDecoder(r io.Reader, opt *DecoderOptions) (dec *Decoder, err error) { - if opt == nil { - opt = &DecoderOptions{} - } - if opt.MaxPayloadSize == 0 { - opt.MaxPayloadSize = DEFAULT_MAX_PAYLOAD_SIZE - } - r = timeoutReader(r, opt) - dec = &Decoder{ - buf: make([]byte, opt.MaxPayloadSize), - opt: *opt, - reader: bufio.NewReader(r), - writer: nil, - } - - var cf ControlFrame - if opt.Bidirectional { - w, ok := r.(io.Writer) - if !ok { - return dec, ErrType - } - dec.writer = bufio.NewWriter(w) - - // Read the ready control frame. - err = cf.DecodeTypeEscape(dec.reader, CONTROL_READY) - if err != nil { - return - } - - // Check content type. - if !cf.MatchContentType(dec.opt.ContentType) { - return dec, ErrContentTypeMismatch - } - - // Send the accept control frame. - accept := ControlAccept - accept.SetContentType(dec.opt.ContentType) - err = accept.EncodeFlush(dec.writer) - if err != nil { - return - } - } - - // Read the start control frame. - err = cf.DecodeTypeEscape(dec.reader, CONTROL_START) - if err != nil { - return - } - - // Disable the read timeout to prevent killing idle connections. - disableReadTimeout(r) - - // Check content type. - if !cf.MatchContentType(dec.opt.ContentType) { - return dec, ErrContentTypeMismatch - } - - return -} - -func (dec *Decoder) readFrame(frameLen uint32) (err error) { - // Enforce limits on frame size. - if frameLen > dec.opt.MaxPayloadSize { - err = ErrDataFrameTooLarge - return - } - - // Read the frame. - n, err := io.ReadFull(dec.reader, dec.buf[0:frameLen]) - if err != nil || uint32(n) != frameLen { - return - } - return -} - -func (dec *Decoder) Decode() (frameData []byte, err error) { - if dec.stopped { - err = EOF - return - } - - // Read the frame length. - var frameLen uint32 - err = binary.Read(dec.reader, binary.BigEndian, &frameLen) - if err != nil { - return - } - if frameLen == 0 { - // This is a control frame. - var cf ControlFrame - err = cf.Decode(dec.reader) - if err != nil { - return - } - if cf.ControlType == CONTROL_STOP { - dec.stopped = true - if dec.opt.Bidirectional { - ff := &ControlFrame{ControlType: CONTROL_FINISH} - err = ff.EncodeFlush(dec.writer) - if err != nil { - return - } - } - return nil, EOF - } - if err != nil { - return nil, err - } - - } else { - // This is a data frame. - err = dec.readFrame(frameLen) - if err != nil { - return - } - frameData = dec.buf[0:frameLen] - } - - return -} diff --git a/vendor/github.com/farsightsec/golang-framestream/Encoder.go b/vendor/github.com/farsightsec/golang-framestream/Encoder.go deleted file mode 100644 index 878751d3f..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/Encoder.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package framestream - -import ( - "bufio" - "encoding/binary" - "io" - "time" -) - -type EncoderOptions struct { - ContentType []byte - Bidirectional bool - Timeout time.Duration -} - -type Encoder struct { - writer *bufio.Writer - reader *bufio.Reader - opt EncoderOptions - buf []byte -} - -func NewEncoder(w io.Writer, opt *EncoderOptions) (enc *Encoder, err error) { - if opt == nil { - opt = &EncoderOptions{} - } - w = timeoutWriter(w, opt) - enc = &Encoder{ - writer: bufio.NewWriter(w), - opt: *opt, - } - - if opt.Bidirectional { - r, ok := w.(io.Reader) - if !ok { - return nil, ErrType - } - enc.reader = bufio.NewReader(r) - ready := ControlReady - ready.SetContentType(opt.ContentType) - if err = ready.EncodeFlush(enc.writer); err != nil { - return - } - - var accept ControlFrame - if err = accept.DecodeTypeEscape(enc.reader, CONTROL_ACCEPT); err != nil { - return - } - - if !accept.MatchContentType(opt.ContentType) { - return nil, ErrContentTypeMismatch - } - } - - // Write the start control frame. - start := ControlStart - start.SetContentType(opt.ContentType) - err = start.EncodeFlush(enc.writer) - if err != nil { - return - } - - return -} - -func (enc *Encoder) Close() (err error) { - err = ControlStop.EncodeFlush(enc.writer) - if err != nil || !enc.opt.Bidirectional { - return - } - - var finish ControlFrame - return finish.DecodeTypeEscape(enc.reader, CONTROL_FINISH) -} - -func (enc *Encoder) Write(frame []byte) (n int, err error) { - err = binary.Write(enc.writer, binary.BigEndian, uint32(len(frame))) - if err != nil { - return - } - return enc.writer.Write(frame) -} - -func (enc *Encoder) Flush() error { - return enc.writer.Flush() -} diff --git a/vendor/github.com/farsightsec/golang-framestream/LICENSE b/vendor/github.com/farsightsec/golang-framestream/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/farsightsec/golang-framestream/README.md b/vendor/github.com/farsightsec/golang-framestream/README.md deleted file mode 100644 index 9d1fd76dc..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Frame Streams implementation in Go - -https://github.com/farsightsec/golang-framestream - -Frame Streams is a lightweight, binary-clean protocol that allows -for the transport of arbitrarily encoded data payload sequences with -minimal framing overhead. - -This package provides a pure Golang implementation. The Frame Streams -implementation in C is at https://github.com/farsightsec/fstrm/. - -The example framestream_dump program reads a Frame Streams formatted -input file and prints the data frames and frame byte counts. diff --git a/vendor/github.com/farsightsec/golang-framestream/framestream.go b/vendor/github.com/farsightsec/golang-framestream/framestream.go deleted file mode 100644 index 49656981c..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/framestream.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2014 by Farsight Security, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package framestream - -import ( - "errors" - "io" -) - -const DEFAULT_MAX_PAYLOAD_SIZE = 1048576 -const MAX_CONTROL_FRAME_SIZE = 512 - -var EOF = io.EOF -var ErrContentTypeMismatch = errors.New("content type mismatch") -var ErrDataFrameTooLarge = errors.New("data frame too large") -var ErrShortRead = errors.New("short read") -var ErrDecode = errors.New("decoding error") -var ErrType = errors.New("invalid type") diff --git a/vendor/github.com/farsightsec/golang-framestream/timeout.go b/vendor/github.com/farsightsec/golang-framestream/timeout.go deleted file mode 100644 index ce3d69e1f..000000000 --- a/vendor/github.com/farsightsec/golang-framestream/timeout.go +++ /dev/null @@ -1,67 +0,0 @@ -package framestream - -import ( - "io" - "net" - "time" -) - -type timeoutConn struct { - conn net.Conn - readTimeout, writeTimeout time.Duration -} - -func (toc *timeoutConn) Write(b []byte) (int, error) { - if toc.writeTimeout != 0 { - toc.conn.SetWriteDeadline(time.Now().Add(toc.writeTimeout)) - } - return toc.conn.Write(b) -} - -func (toc *timeoutConn) Read(b []byte) (int, error) { - if toc.readTimeout != 0 { - toc.conn.SetReadDeadline(time.Now().Add(toc.readTimeout)) - } - return toc.conn.Read(b) -} - -func timeoutWriter(w io.Writer, opt *EncoderOptions) io.Writer { - if !opt.Bidirectional { - return w - } - if opt.Timeout == 0 { - return w - } - if c, ok := w.(net.Conn); ok { - return &timeoutConn{ - conn: c, - readTimeout: opt.Timeout, - writeTimeout: opt.Timeout, - } - } - return w -} - -func timeoutReader(r io.Reader, opt *DecoderOptions) io.Reader { - if !opt.Bidirectional { - return r - } - if opt.Timeout == 0 { - return r - } - if c, ok := r.(net.Conn); ok { - return &timeoutConn{ - conn: c, - readTimeout: opt.Timeout, - writeTimeout: opt.Timeout, - } - } - return r -} - -func disableReadTimeout(r io.Reader) { - if tc, ok := r.(*timeoutConn); ok { - tc.readTimeout = 0 - tc.conn.SetReadDeadline(time.Time{}) - } -} diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 000000000..c5275d5ab --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index c0d70b2fa..623d3d83f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -29,72 +29,27 @@ type Collector interface { // collected by this Collector to the provided channel and returns once // the last descriptor has been sent. The sent descriptors fulfill the // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. Describe(chan<- *Desc) // Collect is called by the Prometheus registry when collecting // metrics. The implementation sends each collected metric via the // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. Collect(chan<- Metric) } -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - // selfCollector implements Collector for a single Metric so that the Metric // collects itself. Add it as an anonymous field to a struct that implements // Metric, and call init with the Metric itself as an argument. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index d463e36d3..ee37949ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -15,10 +15,6 @@ package prometheus import ( "errors" - "math" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" ) // Counter is a Metric that represents a single numerical value that only ever @@ -34,8 +30,16 @@ type Counter interface { Metric Collector - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + // + // Deprecated: Use NewConstMetric to create a counter for an external + // value. A Counter should never be set. + Set(float64) + // Inc increments the counter by 1. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. @@ -46,14 +50,6 @@ type Counter interface { type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. func NewCounter(opts CounterOpts) Counter { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -61,58 +57,20 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} result.init(result) // Init self-collection. return result } type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair -} - -func (c *counter) Desc() *Desc { - return c.desc + value } func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) Write(out *dto.Metric) error { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - val := fval + float64(ival) - - return populateMetric(CounterValue, val, c.labelPairs, out) + c.value.Add(v) } // CounterVec is a Collector that bundles a set of Counters that all share the @@ -120,12 +78,16 @@ func (c *counter) Write(out *dto.Metric) error { // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. type CounterVec struct { - *metricVec + *MetricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -134,62 +96,34 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} result.init(result) // Init self-collection. return result }), } } -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -197,57 +131,18 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) } // CounterFunc is a Counter whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 1d034f871..77f4b30e8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,15 +16,33 @@ package prometheus import ( "errors" "fmt" + "regexp" "sort" "strings" "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -60,27 +78,32 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occurred during construction. It is reported on + // err is an error that occured during construction. It is reported on // registration time. err error } // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. +// be nil if no such labels should be set. fqName and help must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, variableLabels: variableLabels, } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -93,7 +116,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + d.err = fmt.Errorf("%q is not a valid label name", labelName) return d } labelNames = append(labelNames, labelName) @@ -104,18 +127,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + d.err = fmt.Errorf("%q is not a valid label name", labelName) return d } labelNames = append(labelNames, "$"+labelName) @@ -125,7 +142,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } - vh := hashNew() for _, val := range labelValues { vh = hashAdd(vh, val) @@ -152,7 +168,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(LabelPairSorter(d.constLabelPairs)) return d } @@ -182,3 +198,8 @@ func (d *Desc) String() string { d.variableLabels, ) } + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 5d9525def..b15a2d3b9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,15 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless -// specified otherwise. +//specified otherwise. // // A Basic Example // @@ -28,7 +26,6 @@ // package main // // import ( -// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" @@ -62,7 +59,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) +// http.ListenAndServe(":8080", nil) // } // // @@ -72,12 +69,9 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example +// overwhelming. Hovever, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. +// vector versions for basic usage. // // Above, you have already touched the Counter and the Gauge. There are two more // advanced metric types: the Summary and Histogram. A more thorough description @@ -101,8 +95,8 @@ // SummaryVec, HistogramVec, and UntypedVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. // // Custom Collectors and constant Metrics // @@ -120,18 +114,8 @@ // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // NewConstSummary (and their respective Must… versions). That will happen in // the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still -// be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -145,34 +129,34 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. // // So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can be found in the global DefaultRegistry variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // -// Also note that the DefaultRegisterer comes registered with a Collector for Go +// Also note that the DefaultRegistry comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. @@ -182,20 +166,16 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// // Other Means of Exposition // -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go index 3d383a735..e3b67df8a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 71d406bd9..8b70e5141 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -13,14 +13,6 @@ package prometheus -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // @@ -35,95 +27,29 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. + // Inc increments the Gauge by 1. Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. + // Dec decrements the Gauge by 1. Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( + return newValue(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, out) + ), GaugeValue, 0) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -132,11 +58,12 @@ func (g *gauge) Write(out *dto.Metric) error { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *metricVec + *MetricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -145,62 +72,28 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) }), } } -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -208,57 +101,18 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) } // GaugeFunc is a Gauge whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ba3b9333e..abc9d4ec4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package prometheus import ( @@ -21,39 +8,26 @@ import ( ) type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc + goroutines Gauge + gcDesc *Desc // metrics to describe and collect metrics memStatsMetrics } -// NewGoCollector returns a collector which exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This causes a stop-the-world, which is very short with Go1.9+ -// (~25µs). However, with older Go versions, the stop-the-world duration depends -// on the heap size and can be quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). +// NewGoCollector returns a collector which exports metrics about the current +// go process. func NewGoCollector() Collector { return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the GC invocation durations.", nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), metrics: memStatsMetrics{ { desc: NewDesc( @@ -74,7 +48,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained by system. Sum of all system allocations.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -137,12 +111,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, + valType: CounterValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -239,14 +213,6 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, }, } @@ -258,10 +224,9 @@ func memstatNamespace(s string) string { // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc + ch <- c.goroutines.Desc() ch <- c.gcDesc - ch <- c.goInfoDesc + for _, i := range c.metrics { ch <- i.desc } @@ -269,9 +234,8 @@ func (c *goCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -282,9 +246,7 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) ms := &runtime.MemStats{} runtime.ReadMemStats(ms) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f88da707b..9719e8fac 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -16,9 +16,7 @@ package prometheus import ( "fmt" "math" - "runtime" "sort" - "sync" "sync/atomic" "github.com/golang/protobuf/proto" @@ -110,9 +108,8 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { } // HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. type HistogramOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Histogram (created by joining these components with @@ -123,22 +120,29 @@ type HistogramOpts struct { Subsystem string Name string - // Help provides information about this Histogram. + // Help provides information about this Histogram. Mandatory! // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each @@ -165,7 +169,7 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(errInconsistentCardinality) } for _, n := range desc.variableLabels { @@ -187,7 +191,6 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr desc: desc, upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -204,53 +207,28 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) h.init(h) // Init self-collection. return h } -type histogramCounts struct { +type histogram struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 - buckets []uint64 -} - -type histogram struct { - // countAndHotIdx is a complicated one. For lock-free yet atomic - // observations, we need to save the total count of observations again, - // combined with the index of the currently-hot counts struct, so that - // we can perform the operation on both values atomically. The least - // significant bit defines the hot counts struct. The remaining 63 bits - // represent the total count of observations. This happens under the - // assumption that the 63bit count will never overflow. Rationale: An - // observations takes about 30ns. Let's assume it could happen in - // 10ns. Overflowing the counter will then take at least (2^63)*10ns, - // which is about 3000 years. - // - // This has to be first in the struct for 64bit alignment. See - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + // Note that there is no mutex required. - upperBounds []float64 + desc *Desc - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - hotIdx int // Index of currently-hot counts. Only used within Write. + upperBounds []float64 + counts []uint64 labelPairs []*dto.LabelPair } @@ -270,113 +248,36 @@ func (h *histogram) Observe(v float64) { // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op i := sort.SearchFloat64s(h.upperBounds, v) - - // We increment h.countAndHotIdx by 2 so that the counter in the upper - // 63 bits gets incremented by 1. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 2) - hotCounts := h.counts[n%2] - - if i < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[i], 1) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) } + atomic.AddUint64(&h.count, 1) for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) + oldBits := atomic.LoadUint64(&h.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { break } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) } func (h *histogram) Write(out *dto.Metric) error { - var ( - his = &dto.Histogram{} - buckets = make([]*dto.Bucket, len(h.upperBounds)) - hotCounts, coldCounts *histogramCounts - count uint64 - ) - - // For simplicity, we mutex the rest of this method. It is not in the - // hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() - - // This is a bit arcane, which is why the following spells out this if - // clause in English: - // - // If the currently-hot counts struct is #0, we atomically increment - // h.countAndHotIdx by 1 so that from now on Observe will use the counts - // struct #1. Furthermore, the atomic increment gives us the new value, - // which, in its most significant 63 bits, tells us the count of - // observations done so far up to and including currently ongoing - // observations still using the counts struct just changed from hot to - // cold. To have a normal uint64 for the count, we bitshift by 1 and - // save the result in count. We also set h.hotIdx to 1 for the next - // Write call, and we will refer to counts #1 as hotCounts and to counts - // #0 as coldCounts. - // - // If the currently-hot counts struct is #1, we do the corresponding - // things the other way round. We have to _decrement_ h.countAndHotIdx - // (which is a bit arcane in itself, as we have to express -1 with an - // unsigned int...). - if h.hotIdx == 0 { - count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 - h.hotIdx = 1 - hotCounts = h.counts[1] - coldCounts = h.counts[0] - } else { - count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. - h.hotIdx = 0 - hotCounts = h.counts[0] - coldCounts = h.counts[1] - } - - // Now we have to wait for the now-declared-cold counts to actually cool - // down, i.e. wait for all observations still using it to finish. That's - // the case once the count in the cold counts struct is the same as the - // one atomically retrieved from the upper 63bits of h.countAndHotIdx. - for { - if count == atomic.LoadUint64(&coldCounts.count) { - break - } - runtime.Gosched() // Let observations get work done. - } + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) - his.SampleCount = proto.Uint64(count) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) - var cumCount uint64 + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + count += atomic.LoadUint64(&h.counts[i]) buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), + CumulativeCount: proto.Uint64(count), UpperBound: proto.Float64(upperBound), } } - his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } return nil } @@ -386,11 +287,12 @@ func (h *histogram) Write(out *dto.Metric) error { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *metricVec + *MetricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -399,116 +301,47 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Observer), err + return metric.(Histogram), err } return nil, err } -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Observer), err + return metric.(Histogram), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) } -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) } type constHistogram struct { @@ -560,7 +393,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // bucket. // // NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. +// consistent with the variable labels in Desc. func NewConstHistogram( desc *Desc, count uint64, @@ -568,11 +401,8 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constHistogram{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 9f0875bfc..67ee5ac79 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -15,7 +15,9 @@ package prometheus import ( "bufio" + "bytes" "compress/gzip" + "fmt" "io" "net" "net/http" @@ -39,10 +41,19 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) } // Handler returns an HTTP handler for the DefaultGatherer. It is @@ -50,50 +61,68 @@ var gzipPool = sync.Pool{ // name). // // Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead. +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is non instrumented). func Handler() http.Handler { return InstrumentHandler("prometheus", UninstrumentedHandler()) } // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. // -// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) -// instead. See there for further documentation. +// Deprecated: Use promhttp.Handler instead. See there for further documentation. func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { mfs, err := DefaultGatherer.Gather() if err != nil { - httpError(rsp, err) + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) return } contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error for _, mf := range mfs { if err := enc.Encode(mf); err != nil { - httpError(rsp, err) + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) return } } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) }) } +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + var instLabels = []string{"method", "code"} type nower interface { @@ -110,6 +139,16 @@ var now nower = nowFunc(func() time.Time { return time.Now() }) +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + // InstrumentHandler wraps the given HTTP handler for instrumentation. It // registers four metric collectors (if not already done) and reports HTTP // metrics to the (newly or already) registered collectors: http_requests_total @@ -119,16 +158,23 @@ var now nower = nowFunc(func() time.Time { // value. http_requests_total is a metric vector partitioned by HTTP method // (label name "method") and HTTP status code (label name "code"). // -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: (1) It uses Summaries -// rather than Histograms. Summaries are not useful if aggregation across -// multiple instances is required. (2) It uses microseconds as unit, which is -// deprecated and should be replaced by seconds. (3) The size of the request is -// calculated in a separate goroutine. Since this calculator requires access to -// the request header, it creates a race with any writes to the header performed -// during request handling. httputil.ReverseProxy is a prominent example for a -// handler performing such writes. (4) It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. +// Deprecated: InstrumentHandler has several issues: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// Upcoming versions of this package will provide ways of instrumenting HTTP +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) } @@ -138,13 +184,12 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun // issues). // // Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. +// InstrumentHandler is. func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { return InstrumentHandlerFuncWithOpts( SummaryOpts{ Subsystem: "http", ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, handlerFunc, ) @@ -177,7 +222,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri // SummaryOpts. // // Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. +// InstrumentHandler is. func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) } @@ -188,7 +233,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand // SummaryOpts are used. // // Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +// as InstrumentHandler is. func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { reqCnt := NewCounterVec( CounterOpts{ @@ -200,52 +245,34 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo }, instLabels, ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } opts.Name = "request_duration_microseconds" opts.Help = "The HTTP request latencies in microseconds." reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } opts.Name = "request_size_bytes" opts.Help = "The HTTP request sizes in bytes." reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } opts.Name = "response_size_bytes" opts.Help = "The HTTP response sizes in bytes." resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) _, cn := w.(http.CloseNotifier) _, fl := w.(http.Flusher) @@ -263,52 +290,39 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo method := sanitizeMethod(r.Method) code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) }) } -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current goroutine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + } + s += len(r.Host) - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - return out + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s } type responseWriterDelegator struct { http.ResponseWriter - status int - written int64 - wroteHeader bool + handler, method string + status int + written int64 + wroteHeader bool } func (r *responseWriterDelegator) WriteHeader(code int) { @@ -474,31 +488,3 @@ func sanitizeCode(s int) string { return strconv.Itoa(s) } } - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 351c26e1a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2744443ac..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 55e6d86d5..d4063d98f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -15,9 +15,6 @@ package prometheus import ( "strings" - "time" - - "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) @@ -46,8 +43,9 @@ type Metric interface { // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The @@ -59,9 +57,8 @@ type Metric interface { // implementation XXX has its own XXXOpts type, but in most cases, it is just be // an alias of this type (which might change when the requirement arises.) // -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with @@ -72,7 +69,7 @@ type Opts struct { Subsystem string Name string - // Help provides information about this metric. + // Help provides information about this metric. Mandatory! // // Metrics with the same fully-qualified name must have the same Help // string. @@ -82,12 +79,20 @@ type Opts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). ConstLabels Labels } @@ -113,22 +118,37 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair -func (s labelPairSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s labelPairSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s labelPairSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + type invalidMetric struct { desc *Desc err error @@ -144,31 +164,3 @@ func NewInvalidMetric(desc *Desc, err error) Metric { func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 5806cd09e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 55176d58c..e31e62e78 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -13,139 +13,89 @@ package prometheus -import ( - "errors" - "os" - - "github.com/prometheus/procfs" -) +import "github.com/prometheus/procfs" type processCollector struct { + pid int collectFn func(chan<- Metric) pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge } // NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// Currently, the collector depends on a Linux-style proc filesystem and -// therefore only exports metrics for Linux. -// -// Note: An older version of this function had the following signature: -// -// NewProcessCollector(pid int, namespace string) Collector -// -// Most commonly, it was called as -// -// NewProcessCollector(os.Getpid(), "") -// -// The following call of the current version is equivalent to the above: -// -// NewProcessCollector(ProcessCollectorOpts{}) -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} - if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } - } else { - c.pidFn = opts.PidFn +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), } // Set up process metric collection if supported by the runtime. if _, err := procfs.NewStat(); err == nil { c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } } - return c + return &c } // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() } // Collect returns the current state of all metrics of the collector. @@ -153,52 +103,40 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. func (c *processCollector) processCollect(ch chan<- Metric) { pid, err := c.pidFn() if err != nil { - c.reportError(ch, nil, err) return } p, err := procfs.NewProc(pid) if err != nil { - c.reportError(ch, nil, err) return } if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) + c.startTime.Set(startTime) + ch <- c.startTime } - } else { - c.reportError(ch, nil, err) } if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) + c.openFDs.Set(float64(fds)) + ch <- c.openFDs } if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs } - ch <- NewInvalidMetric(desc, err) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 67b56d37c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) - if r.observeWriteHeader != nil { - r.observeWriteHeader(code) - } -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} -func (d flusherDelegator) Flush() { - d.ResponseWriter.(http.Flusher).Flush() -} -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index 31a706956..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -type pusherDelegator struct{ *responseWriterDelegator } - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func init() { - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go deleted file mode 100644 index 8bb9b8b68..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 668eb6b3c..b6dd5a266 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -11,34 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package promhttp provides tooling around HTTP servers and clients. +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. // -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package promhttp contains functions to create http.Handler instances to +// expose Prometheus metrics via HTTP. In later versions of this package, it +// will also contain tooling to instrument instances of http.Handler and +// http.RoundTripper. // -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. +// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, +// you can create a handler for a custom registry or anything that implements +// the Gatherer interface. It also allows to create handlers that act +// differently on errors or allow to log errors. package promhttp import ( + "bytes" "compress/gzip" "fmt" "io" "net/http" "strings" "sync" - "time" "github.com/prometheus/common/expfmt" @@ -52,56 +49,36 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) } -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. // -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) + return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) } -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// of the Handler is defined by the provided HandlerOpts. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var inFlightSem chan struct{} - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { @@ -112,40 +89,26 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { panic(err) case ContinueOnError: if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) return } case HTTPErrorOnError: - httpError(rsp, err) + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) return } } contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) var lastErr error for _, mf := range mfs { if err := enc.Encode(mf); err != nil { lastErr = err if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) + opts.ErrorLog.Println("error encoding metric family:", err) } switch opts.ErrorHandling { case PanicOnError: @@ -153,75 +116,27 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { case ContinueOnError: // Handled later. case HTTPErrorOnError: - httpError(rsp, err) + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) return } } } - - if lastErr != nil { - httpError(rsp, lastErr) + if closer, ok := writer.(io.Closer); ok { + closer.Close() } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) } // HandlerErrorHandling defines how a Handler serving metrics will handle @@ -265,47 +180,22 @@ type HandlerOpts struct { // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") for _, part := range parts { - part = strings.TrimSpace(part) + part := strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true + return gzip.NewWriter(writer), "gzip" } } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) + return writer, "" } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 86fd56447..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - }) -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(counter) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() - } - return resp, err - }) -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(obs) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) - } - return resp, err - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go deleted file mode 100644 index a034d1ec0..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" -) - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index 9db243805..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) - }) -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have zero, one, or two non-const non-curried labels. For those, the only -// allowed label names are "code" and "method". The function panics -// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or -// HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(counter) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() - }) -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) - }) - next.ServeHTTP(d, r) - }) -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) - }) -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the response size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { - code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) - }) -} - -func checkLabels(c prometheus.Collector) (code bool, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index b5e70b93f..32a3986b0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,22 +15,15 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" - "path/filepath" - "runtime" "sort" - "strings" "sync" - "unicode/utf8" "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" ) const ( @@ -42,14 +35,13 @@ const ( // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -57,7 +49,7 @@ var ( ) func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewProcessCollector(os.Getpid(), "")) MustRegister(NewGoCollector()) } @@ -73,8 +65,7 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe methed does not yield any descriptors) are excluded from the check. +// actually been registered with the registry. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -89,7 +80,7 @@ func NewPedanticRegistry() *Registry { // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type +// Registerer as type for registration purposes (rather then the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { @@ -104,13 +95,8 @@ type Registerer interface { // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) + // It is in general not safe to register the same Collector multiple + // times concurrently. Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -119,9 +105,7 @@ type Registerer interface { // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). + // returns whether a Collector was unregistered. // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered @@ -139,23 +123,15 @@ type Registerer interface { type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } @@ -176,6 +152,38 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } +// RegisterOrGet registers the provided Collector with the DefaultRegisterer and +// returns the Collector, unless an equal Collector was registered before, in +// which case that Collector is returned. +// +// Deprecated: RegisterOrGet is merely a convenience function for the +// implementation as described in the documentation for +// AlreadyRegisteredError. As the use case is relatively rare, this function +// will be removed in a future version of this package to clean up the +// namespace. +func RegisterOrGet(c Collector) (Collector, error) { + if err := Register(c); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + return are.ExistingCollector, nil + } + return nil, err + } + return c, nil +} + +// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning +// an error. +// +// Deprecated: This is deprecated for the same reason RegisterOrGet is. See +// there for details. +func MustRegisterOrGet(c Collector) Collector { + c, err := RegisterOrGet(c) + if err != nil { + panic(err) + } + return c +} + // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -193,6 +201,25 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } +// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that +// gathers from the previous DefaultGatherers but then merges the MetricFamily +// protobufs returned from the provided hook function with the MetricFamily +// protobufs returned from the original DefaultGatherer. +// +// Deprecated: This function manipulates the DefaultGatherer variable. Consider +// the implications, i.e. don't do this concurrently with any uses of the +// DefaultGatherer. In the rare cases where you need to inject MetricFamily +// protobufs directly, it is recommended to use a custom Registry and combine it +// with a custom Gatherer using the Gatherers type (see +// there). SetMetricFamilyInjectionHook only exists for compatibility reasons +// with previous versions of this package. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + DefaultGatherer = Gatherers{ + DefaultGatherer, + GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), + } +} + // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -225,13 +252,6 @@ func (errs MultiError) Error() string { return buf.String() } -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -256,7 +276,6 @@ type Registry struct { collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 - uncheckedCollectors []Collector pedanticChecksEnabled bool } @@ -274,13 +293,8 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... + defer r.mtx.Unlock() + // Coduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -319,10 +333,9 @@ func (r *Registry) Register(c Collector) error { } } } - // A Collector yielding no Desc at all is considered unchecked. + // Did anything happen at all? if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil + return errors.New("collector has no descriptors") } if existing, exists := r.collectorsByID[collectorID]; exists { return AlreadyRegisteredError{ @@ -396,25 +409,31 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) } + // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -423,258 +442,127 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() + r.mtx.RUnlock() - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + // Drain metricChan in case of premature return. defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } + for _ = range metricChan { } }() - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, )) - case metric, ok := <-umc: - if !ok { - umc = nil - break + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, )) + continue } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err } - metricFamiliesByName[desc.fqName] = metricFamily + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } // Gatherers is a slice of Gatherer instances that implements the Gatherer @@ -700,6 +588,7 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} errs MultiError // The collected errors to return in the end. ) @@ -736,14 +625,10 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { errs = append(errs, err) continue } @@ -751,80 +636,88 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { } } } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } + if s[j].TimestampMs == nil { + return true } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice whith empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) } } - return nil + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result } // checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, ) error { - name := metricFamily.GetName() - // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -832,65 +725,41 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), ) } - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? + // Is the metric unique (i.e. no other metric with the same name and the same label values)? h := hashNew() - h = hashAdd(h, name) + h = hashAdd(h, metricFamily.GetName()) h = hashAddByte(h, separatorByte) + dh := hashNew() // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } + sort.Sort(LabelPairSorter(dtoMetric.Label)) for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetName()) - h = hashAddByte(h, separatorByte) h = hashAdd(h, lp.GetValue()) h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) } if _, exists := metricHashes[h]; exists { return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, ) } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } metricHashes[h] = struct{}{} return nil } @@ -909,8 +778,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), @@ -922,7 +791,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 2980614df..bce05bf9a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -36,10 +36,7 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. +// as rank estimations. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -57,9 +54,6 @@ type Summary interface { } // DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. var ( DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} @@ -81,10 +75,8 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v0.10 of the library. +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -95,39 +87,35 @@ type SummaryOpts struct { Subsystem string Name string - // Help provides information about this Summary. + // Help provides information about this Summary. Mandatory! // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -181,7 +169,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(errInconsistentCardinality) } for _, n := range desc.variableLabels { @@ -195,7 +183,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if opts.Objectives == nil { + if len(opts.Objectives) == 0 { opts.Objectives = DefObjectives } @@ -402,21 +390,13 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *metricVec + *MetricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. +// partitioned by the given label names. At least one label name must be +// provided. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, @@ -424,116 +404,47 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Observer), err + return metric.(Summary), err } return nil, err } -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Observer), err + return metric.(Summary), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) } type constSummary struct { @@ -586,7 +497,7 @@ func (s *constSummary) Write(out *dto.Metric) error { // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. +// consistent with the variable labels in Desc. func NewConstSummary( desc *Desc, count uint64, @@ -594,11 +505,8 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constSummary{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 8d5f10523..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 0f9ce63f4..5faf7e6e3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -13,12 +13,108 @@ package prometheus +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + *MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index eb248f108..a944c3775 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,12 +14,15 @@ package prometheus import ( + "errors" "fmt" + "math" "sort" - - "github.com/golang/protobuf/proto" + "sync/atomic" dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -33,6 +36,77 @@ const ( UntypedValue ) +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -77,14 +151,10 @@ func (v *valueFunc) Write(out *dto.Metric) error { // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. +// labelValues is not consistent with the variable labels in Desc. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constMetric{ desc: desc, @@ -156,7 +226,9 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { Value: proto.String(labelValues[i]), }) } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) return labelPairs } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 14ed9e856..7f3eef9a4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,253 +20,200 @@ import ( "github.com/prometheus/common/model" ) -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. It uses basicMetricVec internally. -type metricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling hashAddByte func(h uint64, b byte) uint64 } -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, hashAdd: hashAdd, hashAddByte: hashAddByte, } } -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric } -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc } -func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric } } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &metricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil } -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { h, err := m.hashLabelValues(lvs) if err != nil { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs), nil } -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { h, err := m.hashLabels(labels) if err != nil { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels), nil } -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) } - return h, nil + return metric } -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc + return metric } -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } + h, err := m.hashLabelValues(lvs) + if err != nil { + return false } + return m.deleteByHashWithLabelValues(h, lvs) } -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { m.mtx.Lock() defer m.mtx.Unlock() - for h := range m.metrics { - delete(m.metrics, h) + h, err := m.hashLabels(labels) + if err != nil { + return false } + + return m.deleteByHashWithLabels(h, labels) } // deleteByHashWithLabelValues removes the metric from the hash bucket h. If // there are multiple matches in the bucket, use lvs to select a metric and // remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] if !ok { return false } - i := findMetricWithLabelValues(metrics, lvs, curry) + i := m.findMetricWithLabelValues(metrics, lvs) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + m.children[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.metrics, h) + delete(m.children, h) } return true } @@ -274,38 +221,69 @@ func (m *metricMap) deleteByHashWithLabelValues( // deleteByHashWithLabels removes the metric from the hash bucket h. If there // are multiple matches in the bucket, use lvs to select a metric and remove // only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] if !ok { return false } - i := findMetricWithLabels(m.desc, metrics, labels, curry) + i := m.findMetricWithLabels(metrics, labels) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + m.children[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.metrics, h) + delete(m.children, h) } return true } +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + metric, ok := m.getMetricWithLabelValues(hash, lvs) m.mtx.RUnlock() if ok { return metric @@ -313,11 +291,13 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + metric, ok = m.getMetricWithLabelValues(hash, lvs) if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) } return metric } @@ -326,11 +306,9 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( // or creates it and returns the new one. // // This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + metric, ok := m.getMetricWithLabels(hash, labels) m.mtx.RUnlock() if ok { return metric @@ -338,37 +316,33 @@ func (m *metricMap) getOrCreateMetricWithLabels( m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + metric, ok = m.getMetricWithLabels(hash, labels) if !ok { - lvs := extractLabelValues(m.desc, labels, curry) + lvs := m.extractLabelValues(labels) metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) } return metric } -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { return metrics[i].metric, true } } return nil, false } -// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// getMetricWithLabels gets a metric while handling possible collisions in // the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { return metrics[i].metric, true } } @@ -377,11 +351,9 @@ func (m *metricMap) getMetricWithHashAndLabels( // findMetricWithLabelValues returns the index of the matching metric or // len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { + if m.matchLabelValues(metric.values, lvs) { return i } } @@ -390,51 +362,32 @@ func findMetricWithLabelValues( // findMetricWithLabels returns the index of the matching metric or len(metrics) // if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { + if m.matchLabels(metric.values, labels) { return i } } return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { return false } - var iLVs, iCurry int for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { + if v != lvs[i] { return false } - iLVs++ } return true } -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { return false } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } + for i, k := range m.desc.variableLabels { if values[i] != labels[k] { return false } @@ -442,31 +395,10 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return true } -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } +func (m *MetricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { labelValues[i] = labels[k] } return labelValues } - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 49159bf3e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics exposed. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(labelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go index 96a6b61d9..eb0bdad35 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -1,4 +1,4 @@ -// // +build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl index 9b276d420..ad99f6671 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -1,4 +1,4 @@ -// // +build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index 1a27675fd..c4944dbff 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -165,15 +165,9 @@ type genRunner struct { // // Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { - // trim out all types which already implement Selfer - typ2 := make([]reflect.Type, 0, len(typ)) - for _, t := range typ { - if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) { - continue - } - typ2 = append(typ2, t) - } - typ = typ2 + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer if len(typ) == 0 { return diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go index 231d503a2..d07208c87 100644 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -347,7 +347,7 @@ func (d *simpleDecDriver) decLen() int { } return int(ui) } - d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } diff --git a/vendor/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh index 00857b620..342f336df 100755 --- a/vendor/github.com/ugorji/go/codec/tests.sh +++ b/vendor/github.com/ugorji/go/codec/tests.sh @@ -59,7 +59,7 @@ _run() { } # echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" ]]; then +if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then # All: r, x, g, gu _run "-_tcinsed_ml" # regular _run "-_tcinsed_ml_z" # regular with reset @@ -75,6 +75,28 @@ elif [[ "x$@" = "x-F" ]]; then # regular with notfastpath _run "-_tcinsed_ml_f" # regular _run "-_tcinsed_ml_zf" # regular with reset +elif [[ "x$@" = "x-C" ]]; then + # codecgen + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe +elif [[ "x$@" = "x-X" ]]; then + # external + _run "-x_tcinsed_ml" # external +elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then + cat <